From 8f3a56e2aa29ccfe32575cb77fa03e59300fd09c Mon Sep 17 00:00:00 2001 From: fern-api <115122769+fern-api[bot]@users.noreply.github.com> Date: Wed, 11 Sep 2024 10:30:49 +0000 Subject: [PATCH] Release 0.0.1-beta10 --- README.md | 28 +- pyproject.toml | 2 +- reference.md | 5836 ++----------- src/gooey/__init__.py | 246 +- src/gooey/client.py | 7369 +++-------------- .../copilot_for_your_enterprise/__init__.py | 27 - .../copilot_for_your_enterprise/client.py | 741 -- .../types/__init__.py | 25 - src/gooey/core/client_wrapper.py | 2 +- src/gooey/errors/__init__.py | 3 +- src/gooey/errors/too_many_requests_error.py | 9 - src/gooey/evaluator/__init__.py | 5 - src/gooey/evaluator/client.py | 342 - src/gooey/evaluator/types/__init__.py | 6 - src/gooey/functions/__init__.py | 2 - src/gooey/functions/client.py | 231 - src/gooey/lip_syncing/__init__.py | 5 - src/gooey/lip_syncing/client.py | 296 - src/gooey/lip_syncing/types/__init__.py | 5 - src/gooey/smart_gpt/__init__.py | 5 - src/gooey/smart_gpt/client.py | 324 - src/gooey/smart_gpt/types/__init__.py | 6 - src/gooey/types/__init__.py | 256 + src/gooey/types/asr_page_request.py | 43 + .../types/asr_page_request_functions_item.py | 24 + ...asr_page_request_functions_item_trigger.py | 5 + src/gooey/types/bulk_eval_page_request.py | 56 + ...lk_eval_page_request_agg_functions_item.py | 21 + ...age_request_agg_functions_item_function.py | 25 + ...ulk_eval_page_request_eval_prompts_item.py | 20 + .../bulk_eval_page_request_functions_item.py | 24 + ...val_page_request_functions_item_trigger.py | 5 + ..._eval_page_request_response_format_type.py | 0 .../bulk_eval_page_request_selected_model.py | 0 src/gooey/types/bulk_runner_page_request.py | 55 + ...bulk_runner_page_request_functions_item.py | 24 + ...ner_page_request_functions_item_trigger.py | 5 + src/gooey/types/chyron_plant_page_request.py | 4 +- ...hyron_plant_page_request_functions_item.py | 24 + ...ant_page_request_functions_item_trigger.py | 5 + src/gooey/types/compare_llm_page_request.py | 37 + ...compare_llm_page_request_functions_item.py | 24 + ...llm_page_request_functions_item_trigger.py | 5 + .../types/compare_text2img_page_request.py | 44 + ...re_text2img_page_request_functions_item.py | 24 + ...img_page_request_functions_item_trigger.py | 5 + .../types/compare_upscaler_page_request.py | 37 + ...re_upscaler_page_request_functions_item.py | 24 + ...ler_page_request_functions_item_trigger.py | 5 + src/gooey/types/deforum_sd_page_request.py | 41 + ..._sd_page_request_animation_prompts_item.py | 20 + .../deforum_sd_page_request_functions_item.py | 24 + ..._sd_page_request_functions_item_trigger.py | 5 + src/gooey/types/doc_extract_page_request.py | 43 + ...doc_extract_page_request_functions_item.py | 24 + ...act_page_request_functions_item_trigger.py | 5 + src/gooey/types/doc_search_page_request.py | 56 + .../doc_search_page_request_functions_item.py | 24 + ...rch_page_request_functions_item_trigger.py | 5 + src/gooey/types/doc_summary_page_request.py | 43 + ...doc_summary_page_request_functions_item.py | 24 + ...ary_page_request_functions_item_trigger.py | 5 + .../email_face_inpainting_page_request.py | 51 + ..._inpainting_page_request_functions_item.py | 26 + ...ing_page_request_functions_item_trigger.py | 5 + src/gooey/types/embeddings_page_request.py | 30 + .../embeddings_page_request_functions_item.py | 24 + ...ngs_page_request_functions_item_trigger.py | 5 + .../types/face_inpainting_page_request.py | 42 + ..._inpainting_page_request_functions_item.py | 24 + ...ing_page_request_functions_item_trigger.py | 5 + src/gooey/types/functions_page_request.py | 30 + src/gooey/types/google_gpt_page_request.py | 66 + .../google_gpt_page_request_functions_item.py | 24 + ...gpt_page_request_functions_item_trigger.py | 5 + .../types/google_image_gen_page_request.py | 46 + ...e_image_gen_page_request_functions_item.py | 24 + ...gen_page_request_functions_item_trigger.py | 5 + .../types/image_segmentation_page_request.py | 36 + ...egmentation_page_request_functions_item.py | 24 + ...ion_page_request_functions_item_trigger.py | 5 + src/gooey/types/img2img_page_request.py | 43 + .../img2img_page_request_functions_item.py | 24 + ...img_page_request_functions_item_trigger.py | 5 + src/gooey/types/letter_writer_page_request.py | 8 +- ...riter_page_request_example_letters_item.py | 20 + ...tter_writer_page_request_functions_item.py | 24 + ...ter_page_request_functions_item_trigger.py | 5 + src/gooey/types/lipsync_page_request.py | 37 + .../lipsync_page_request_functions_item.py | 24 + ...ync_page_request_functions_item_trigger.py | 5 + ...lipsync_page_request_sadtalker_settings.py | 40 + ...e_request_sadtalker_settings_preprocess.py | 7 + .../lipsync_page_request_selected_model.py | 0 src/gooey/types/lipsync_tts_page_request.py | 62 + ...lipsync_tts_page_request_functions_item.py | 24 + ...tts_page_request_functions_item_trigger.py | 5 + ...ync_tts_page_request_sadtalker_settings.py | 40 + ...e_request_sadtalker_settings_preprocess.py | 7 + .../types/object_inpainting_page_request.py | 43 + ..._inpainting_page_request_functions_item.py | 24 + ...ing_page_request_functions_item_trigger.py | 5 + .../types/qr_code_generator_page_request.py | 66 + ...e_generator_page_request_functions_item.py | 24 + ...tor_page_request_functions_item_trigger.py | 5 + ...de_generator_page_request_qr_code_vcard.py | 44 + src/gooey/types/recipe_function.py | 8 +- .../types/related_qn_a_doc_page_request.py | 70 + ...ed_qn_a_doc_page_request_functions_item.py | 24 + ...doc_page_request_functions_item_trigger.py | 5 + src/gooey/types/related_qn_a_page_request.py | 66 + ...elated_qn_a_page_request_functions_item.py | 24 + ...n_a_page_request_functions_item_trigger.py | 5 + src/gooey/types/sad_talker_settings.py | 12 +- src/gooey/types/seo_summary_page_request.py | 52 + src/gooey/types/smart_gpt_page_request.py | 40 + .../smart_gpt_page_request_functions_item.py | 24 + ...gpt_page_request_functions_item_trigger.py | 5 + ...t_gpt_page_request_response_format_type.py | 0 .../smart_gpt_page_request_selected_model.py | 0 .../types/social_lookup_email_page_request.py | 38 + ...ookup_email_page_request_functions_item.py | 24 + ...ail_page_request_functions_item_trigger.py | 5 + src/gooey/types/text2audio_page_request.py | 36 + .../text2audio_page_request_functions_item.py | 24 + ...dio_page_request_functions_item_trigger.py | 5 + .../types/text_to_speech_page_request.py | 53 + ...t_to_speech_page_request_functions_item.py | 24 + ...ech_page_request_functions_item_trigger.py | 5 + src/gooey/types/translation_page_request.py | 33 + ...translation_page_request_functions_item.py | 24 + ...ion_page_request_functions_item_trigger.py | 5 + src/gooey/types/video_bots_page_request.py | 130 + .../video_bots_page_request_asr_model.py | 0 .../video_bots_page_request_citation_style.py | 0 ...video_bots_page_request_embedding_model.py | 0 .../video_bots_page_request_functions_item.py | 24 + ...ots_page_request_functions_item_trigger.py | 5 + .../video_bots_page_request_lipsync_model.py | 0 .../video_bots_page_request_messages_item.py | 23 + ...bots_page_request_messages_item_content.py | 6 + ...page_request_messages_item_content_item.py | 41 + ...eo_bots_page_request_messages_item_role.py | 5 + ...ideo_bots_page_request_openai_tts_model.py | 0 ...deo_bots_page_request_openai_voice_name.py | 0 ..._bots_page_request_response_format_type.py | 0 ...eo_bots_page_request_sadtalker_settings.py | 40 + ...e_request_sadtalker_settings_preprocess.py | 7 + .../video_bots_page_request_selected_model.py | 0 ...deo_bots_page_request_translation_model.py | 0 .../video_bots_page_request_tts_provider.py | 0 151 files changed, 4816 insertions(+), 13779 deletions(-) delete mode 100644 src/gooey/copilot_for_your_enterprise/__init__.py delete mode 100644 src/gooey/copilot_for_your_enterprise/client.py delete mode 100644 src/gooey/copilot_for_your_enterprise/types/__init__.py delete mode 100644 src/gooey/errors/too_many_requests_error.py delete mode 100644 src/gooey/evaluator/__init__.py delete mode 100644 src/gooey/evaluator/client.py delete mode 100644 src/gooey/evaluator/types/__init__.py delete mode 100644 src/gooey/functions/__init__.py delete mode 100644 src/gooey/functions/client.py delete mode 100644 src/gooey/lip_syncing/__init__.py delete mode 100644 src/gooey/lip_syncing/client.py delete mode 100644 src/gooey/lip_syncing/types/__init__.py delete mode 100644 src/gooey/smart_gpt/__init__.py delete mode 100644 src/gooey/smart_gpt/client.py delete mode 100644 src/gooey/smart_gpt/types/__init__.py create mode 100644 src/gooey/types/asr_page_request.py create mode 100644 src/gooey/types/asr_page_request_functions_item.py create mode 100644 src/gooey/types/asr_page_request_functions_item_trigger.py create mode 100644 src/gooey/types/bulk_eval_page_request.py create mode 100644 src/gooey/types/bulk_eval_page_request_agg_functions_item.py create mode 100644 src/gooey/types/bulk_eval_page_request_agg_functions_item_function.py create mode 100644 src/gooey/types/bulk_eval_page_request_eval_prompts_item.py create mode 100644 src/gooey/types/bulk_eval_page_request_functions_item.py create mode 100644 src/gooey/types/bulk_eval_page_request_functions_item_trigger.py rename src/gooey/{evaluator => }/types/bulk_eval_page_request_response_format_type.py (100%) rename src/gooey/{evaluator => }/types/bulk_eval_page_request_selected_model.py (100%) create mode 100644 src/gooey/types/bulk_runner_page_request.py create mode 100644 src/gooey/types/bulk_runner_page_request_functions_item.py create mode 100644 src/gooey/types/bulk_runner_page_request_functions_item_trigger.py create mode 100644 src/gooey/types/chyron_plant_page_request_functions_item.py create mode 100644 src/gooey/types/chyron_plant_page_request_functions_item_trigger.py create mode 100644 src/gooey/types/compare_llm_page_request.py create mode 100644 src/gooey/types/compare_llm_page_request_functions_item.py create mode 100644 src/gooey/types/compare_llm_page_request_functions_item_trigger.py create mode 100644 src/gooey/types/compare_text2img_page_request.py create mode 100644 src/gooey/types/compare_text2img_page_request_functions_item.py create mode 100644 src/gooey/types/compare_text2img_page_request_functions_item_trigger.py create mode 100644 src/gooey/types/compare_upscaler_page_request.py create mode 100644 src/gooey/types/compare_upscaler_page_request_functions_item.py create mode 100644 src/gooey/types/compare_upscaler_page_request_functions_item_trigger.py create mode 100644 src/gooey/types/deforum_sd_page_request.py create mode 100644 src/gooey/types/deforum_sd_page_request_animation_prompts_item.py create mode 100644 src/gooey/types/deforum_sd_page_request_functions_item.py create mode 100644 src/gooey/types/deforum_sd_page_request_functions_item_trigger.py create mode 100644 src/gooey/types/doc_extract_page_request.py create mode 100644 src/gooey/types/doc_extract_page_request_functions_item.py create mode 100644 src/gooey/types/doc_extract_page_request_functions_item_trigger.py create mode 100644 src/gooey/types/doc_search_page_request.py create mode 100644 src/gooey/types/doc_search_page_request_functions_item.py create mode 100644 src/gooey/types/doc_search_page_request_functions_item_trigger.py create mode 100644 src/gooey/types/doc_summary_page_request.py create mode 100644 src/gooey/types/doc_summary_page_request_functions_item.py create mode 100644 src/gooey/types/doc_summary_page_request_functions_item_trigger.py create mode 100644 src/gooey/types/email_face_inpainting_page_request.py create mode 100644 src/gooey/types/email_face_inpainting_page_request_functions_item.py create mode 100644 src/gooey/types/email_face_inpainting_page_request_functions_item_trigger.py create mode 100644 src/gooey/types/embeddings_page_request.py create mode 100644 src/gooey/types/embeddings_page_request_functions_item.py create mode 100644 src/gooey/types/embeddings_page_request_functions_item_trigger.py create mode 100644 src/gooey/types/face_inpainting_page_request.py create mode 100644 src/gooey/types/face_inpainting_page_request_functions_item.py create mode 100644 src/gooey/types/face_inpainting_page_request_functions_item_trigger.py create mode 100644 src/gooey/types/functions_page_request.py create mode 100644 src/gooey/types/google_gpt_page_request.py create mode 100644 src/gooey/types/google_gpt_page_request_functions_item.py create mode 100644 src/gooey/types/google_gpt_page_request_functions_item_trigger.py create mode 100644 src/gooey/types/google_image_gen_page_request.py create mode 100644 src/gooey/types/google_image_gen_page_request_functions_item.py create mode 100644 src/gooey/types/google_image_gen_page_request_functions_item_trigger.py create mode 100644 src/gooey/types/image_segmentation_page_request.py create mode 100644 src/gooey/types/image_segmentation_page_request_functions_item.py create mode 100644 src/gooey/types/image_segmentation_page_request_functions_item_trigger.py create mode 100644 src/gooey/types/img2img_page_request.py create mode 100644 src/gooey/types/img2img_page_request_functions_item.py create mode 100644 src/gooey/types/img2img_page_request_functions_item_trigger.py create mode 100644 src/gooey/types/letter_writer_page_request_example_letters_item.py create mode 100644 src/gooey/types/letter_writer_page_request_functions_item.py create mode 100644 src/gooey/types/letter_writer_page_request_functions_item_trigger.py create mode 100644 src/gooey/types/lipsync_page_request.py create mode 100644 src/gooey/types/lipsync_page_request_functions_item.py create mode 100644 src/gooey/types/lipsync_page_request_functions_item_trigger.py create mode 100644 src/gooey/types/lipsync_page_request_sadtalker_settings.py create mode 100644 src/gooey/types/lipsync_page_request_sadtalker_settings_preprocess.py rename src/gooey/{lip_syncing => }/types/lipsync_page_request_selected_model.py (100%) create mode 100644 src/gooey/types/lipsync_tts_page_request.py create mode 100644 src/gooey/types/lipsync_tts_page_request_functions_item.py create mode 100644 src/gooey/types/lipsync_tts_page_request_functions_item_trigger.py create mode 100644 src/gooey/types/lipsync_tts_page_request_sadtalker_settings.py create mode 100644 src/gooey/types/lipsync_tts_page_request_sadtalker_settings_preprocess.py create mode 100644 src/gooey/types/object_inpainting_page_request.py create mode 100644 src/gooey/types/object_inpainting_page_request_functions_item.py create mode 100644 src/gooey/types/object_inpainting_page_request_functions_item_trigger.py create mode 100644 src/gooey/types/qr_code_generator_page_request.py create mode 100644 src/gooey/types/qr_code_generator_page_request_functions_item.py create mode 100644 src/gooey/types/qr_code_generator_page_request_functions_item_trigger.py create mode 100644 src/gooey/types/qr_code_generator_page_request_qr_code_vcard.py create mode 100644 src/gooey/types/related_qn_a_doc_page_request.py create mode 100644 src/gooey/types/related_qn_a_doc_page_request_functions_item.py create mode 100644 src/gooey/types/related_qn_a_doc_page_request_functions_item_trigger.py create mode 100644 src/gooey/types/related_qn_a_page_request.py create mode 100644 src/gooey/types/related_qn_a_page_request_functions_item.py create mode 100644 src/gooey/types/related_qn_a_page_request_functions_item_trigger.py create mode 100644 src/gooey/types/seo_summary_page_request.py create mode 100644 src/gooey/types/smart_gpt_page_request.py create mode 100644 src/gooey/types/smart_gpt_page_request_functions_item.py create mode 100644 src/gooey/types/smart_gpt_page_request_functions_item_trigger.py rename src/gooey/{smart_gpt => }/types/smart_gpt_page_request_response_format_type.py (100%) rename src/gooey/{smart_gpt => }/types/smart_gpt_page_request_selected_model.py (100%) create mode 100644 src/gooey/types/social_lookup_email_page_request.py create mode 100644 src/gooey/types/social_lookup_email_page_request_functions_item.py create mode 100644 src/gooey/types/social_lookup_email_page_request_functions_item_trigger.py create mode 100644 src/gooey/types/text2audio_page_request.py create mode 100644 src/gooey/types/text2audio_page_request_functions_item.py create mode 100644 src/gooey/types/text2audio_page_request_functions_item_trigger.py create mode 100644 src/gooey/types/text_to_speech_page_request.py create mode 100644 src/gooey/types/text_to_speech_page_request_functions_item.py create mode 100644 src/gooey/types/text_to_speech_page_request_functions_item_trigger.py create mode 100644 src/gooey/types/translation_page_request.py create mode 100644 src/gooey/types/translation_page_request_functions_item.py create mode 100644 src/gooey/types/translation_page_request_functions_item_trigger.py create mode 100644 src/gooey/types/video_bots_page_request.py rename src/gooey/{copilot_for_your_enterprise => }/types/video_bots_page_request_asr_model.py (100%) rename src/gooey/{copilot_for_your_enterprise => }/types/video_bots_page_request_citation_style.py (100%) rename src/gooey/{copilot_for_your_enterprise => }/types/video_bots_page_request_embedding_model.py (100%) create mode 100644 src/gooey/types/video_bots_page_request_functions_item.py create mode 100644 src/gooey/types/video_bots_page_request_functions_item_trigger.py rename src/gooey/{copilot_for_your_enterprise => }/types/video_bots_page_request_lipsync_model.py (100%) create mode 100644 src/gooey/types/video_bots_page_request_messages_item.py create mode 100644 src/gooey/types/video_bots_page_request_messages_item_content.py create mode 100644 src/gooey/types/video_bots_page_request_messages_item_content_item.py create mode 100644 src/gooey/types/video_bots_page_request_messages_item_role.py rename src/gooey/{copilot_for_your_enterprise => }/types/video_bots_page_request_openai_tts_model.py (100%) rename src/gooey/{copilot_for_your_enterprise => }/types/video_bots_page_request_openai_voice_name.py (100%) rename src/gooey/{copilot_for_your_enterprise => }/types/video_bots_page_request_response_format_type.py (100%) create mode 100644 src/gooey/types/video_bots_page_request_sadtalker_settings.py create mode 100644 src/gooey/types/video_bots_page_request_sadtalker_settings_preprocess.py rename src/gooey/{copilot_for_your_enterprise => }/types/video_bots_page_request_selected_model.py (100%) rename src/gooey/{copilot_for_your_enterprise => }/types/video_bots_page_request_translation_model.py (100%) rename src/gooey/{copilot_for_your_enterprise => }/types/video_bots_page_request_tts_provider.py (100%) diff --git a/README.md b/README.md index 278b73e..19ff2e0 100644 --- a/README.md +++ b/README.md @@ -16,19 +16,12 @@ pip install gooeyai Instantiate and use the client with the following: ```python -from gooey import AnimationPrompt, Gooey +from gooey import Gooey client = Gooey( api_key="YOUR_API_KEY", ) -client.animate( - animation_prompts=[ - AnimationPrompt( - frame="frame", - prompt="prompt", - ) - ], -) +client.post_v3video_bots_async() ``` ## Async Client @@ -38,7 +31,7 @@ The SDK also exports an `async` client so that you can make non-blocking calls t ```python import asyncio -from gooey import AnimationPrompt, AsyncGooey +from gooey import AsyncGooey client = AsyncGooey( api_key="YOUR_API_KEY", @@ -46,14 +39,7 @@ client = AsyncGooey( async def main() -> None: - await client.animate( - animation_prompts=[ - AnimationPrompt( - frame="frame", - prompt="prompt", - ) - ], - ) + await client.post_v3video_bots_async() asyncio.run(main()) @@ -68,7 +54,7 @@ will be thrown. from gooey.core.api_error import ApiError try: - client.animate(...) + client.post_v3video_bots_async(...) except ApiError as e: print(e.status_code) print(e.body) @@ -91,7 +77,7 @@ A request is deemed retriable when any of the following HTTP status codes is ret Use the `max_retries` request option to configure this behavior. ```python -client.animate(..., { +client.post_v3video_bots_async(..., { "max_retries": 1 }) ``` @@ -111,7 +97,7 @@ client = Gooey( # Override timeout for a specific method -client.animate(..., { +client.post_v3video_bots_async(..., { "timeout_in_seconds": 1 }) ``` diff --git a/pyproject.toml b/pyproject.toml index d698064..cccb889 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "gooeyai" -version = "0.0.1-beta9" +version = "0.0.1-beta10" description = "" readme = "README.md" authors = [] diff --git a/reference.md b/reference.md index 994d108..1b4756d 100644 --- a/reference.md +++ b/reference.md @@ -1,5 +1,5 @@ # Reference -
client.animate(...) +
client.post_v3video_bots_async()
@@ -12,19 +12,12 @@
```python -from gooey import AnimationPrompt, Gooey +from gooey import Gooey client = Gooey( api_key="YOUR_API_KEY", ) -client.animate( - animation_prompts=[ - AnimationPrompt( - frame="frame", - prompt="prompt", - ) - ], -) +client.post_v3video_bots_async() ```
@@ -40,131 +33,95 @@ client.animate(
-**animation_prompts:** `typing.Sequence[AnimationPrompt]` +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
- -
-
- -**example_id:** `typing.Optional[str]` -
-
-
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` -
+
+
client.post_v3deforum_sd_async()
-**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
+#### 🔌 Usage
-**max_frames:** `typing.Optional[int]` - -
-
-
-**selected_model:** `typing.Optional[DeforumSdPageRequestSelectedModel]` - -
-
+```python +from gooey import Gooey -
-
+client = Gooey( + api_key="YOUR_API_KEY", +) +client.post_v3deforum_sd_async() -**animation_mode:** `typing.Optional[str]` - +```
- -
-
- -**zoom:** `typing.Optional[str]` -
+#### ⚙️ Parameters +
-**translation_x:** `typing.Optional[str]` - -
-
-
-**translation_y:** `typing.Optional[str]` +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
- -
-
- -**rotation3d_x:** `typing.Optional[str]` -
-
-
-**rotation3d_y:** `typing.Optional[str]` -
+
+
client.post_v3art_qr_code_async()
-**rotation3d_z:** `typing.Optional[str]` - -
-
+#### 🔌 Usage
-**fps:** `typing.Optional[int]` - -
-
-
-**seed:** `typing.Optional[int]` - +```python +from gooey import Gooey + +client = Gooey( + api_key="YOUR_API_KEY", +) +client.post_v3art_qr_code_async() + +``` +
+
+#### ⚙️ Parameters +
-**settings:** `typing.Optional[RunSettings]` - -
-
-
@@ -180,7 +137,7 @@ client.animate(
-
client.qr_code(...) +
client.post_v3related_qna_maker_async()
@@ -198,9 +155,7 @@ from gooey import Gooey client = Gooey( api_key="YOUR_API_KEY", ) -client.qr_code( - text_prompt="text_prompt", -) +client.post_v3related_qna_maker_async() ```
@@ -216,247 +171,233 @@ client.qr_code(
-**text_prompt:** `str` +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+ +
-
-
-**example_id:** `typing.Optional[str]` -
+
+
client.post_v3seo_summary_async()
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` - -
-
+#### 🔌 Usage
-**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
-
-**qr_code_data:** `typing.Optional[str]` - +```python +from gooey import Gooey + +client = Gooey( + api_key="YOUR_API_KEY", +) +client.post_v3seo_summary_async() + +``` +
+
+#### ⚙️ Parameters +
-**qr_code_input_image:** `typing.Optional[str]` - -
-
-
-**qr_code_vcard:** `typing.Optional[Vcard]` +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+ + -
-
-**qr_code_file:** `typing.Optional[str]` -
+
+
client.post_v3google_gpt_async()
-**use_url_shortener:** `typing.Optional[bool]` - -
-
+#### 🔌 Usage
-**negative_prompt:** `typing.Optional[str]` - -
-
-
-**image_prompt:** `typing.Optional[str]` - +```python +from gooey import Gooey + +client = Gooey( + api_key="YOUR_API_KEY", +) +client.post_v3google_gpt_async() + +``` +
+
+#### ⚙️ Parameters +
-**image_prompt_controlnet_models:** `typing.Optional[ - typing.Sequence[QrCodeGeneratorPageRequestImagePromptControlnetModelsItem] -]` - -
-
-
-**image_prompt_strength:** `typing.Optional[float]` +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+ + -
-
-**image_prompt_scale:** `typing.Optional[float]` -
+
+
client.post_v3social_lookup_email_async()
-**image_prompt_pos_x:** `typing.Optional[float]` - -
-
+#### 🔌 Usage
-**image_prompt_pos_y:** `typing.Optional[float]` - -
-
-
-**selected_model:** `typing.Optional[QrCodeGeneratorPageRequestSelectedModel]` - +```python +from gooey import Gooey + +client = Gooey( + api_key="YOUR_API_KEY", +) +client.post_v3social_lookup_email_async() + +``` +
+
+#### ⚙️ Parameters +
-**selected_controlnet_model:** `typing.Optional[ - typing.Sequence[QrCodeGeneratorPageRequestSelectedControlnetModelItem] -]` - -
-
-
-**output_width:** `typing.Optional[int]` +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+ + -
-
-**output_height:** `typing.Optional[int]` -
+
+
client.post_v3bulk_runner_async()
-**guidance_scale:** `typing.Optional[float]` - -
-
+#### 🔌 Usage
-**controlnet_conditioning_scale:** `typing.Optional[typing.Sequence[float]]` - -
-
-
-**num_outputs:** `typing.Optional[int]` - +```python +from gooey import Gooey + +client = Gooey( + api_key="YOUR_API_KEY", +) +client.post_v3bulk_runner_async() + +``` +
+
+#### ⚙️ Parameters +
-**quality:** `typing.Optional[int]` - -
-
-
-**scheduler:** `typing.Optional[QrCodeGeneratorPageRequestScheduler]` +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+ + -
-
-**seed:** `typing.Optional[int]` -
+
+
client.post_v3bulk_eval_async()
-**obj_scale:** `typing.Optional[float]` - -
-
+#### 🔌 Usage
-**obj_pos_x:** `typing.Optional[float]` - -
-
-
-**obj_pos_y:** `typing.Optional[float]` - +```python +from gooey import Gooey + +client = Gooey( + api_key="YOUR_API_KEY", +) +client.post_v3bulk_eval_async() + +``` +
+
+#### ⚙️ Parameters +
-**settings:** `typing.Optional[RunSettings]` - -
-
-
@@ -472,7 +413,7 @@ client.qr_code(
-
client.seo_people_also_ask(...) +
client.post_v3doc_extract_async()
@@ -490,10 +431,7 @@ from gooey import Gooey client = Gooey( api_key="YOUR_API_KEY", ) -client.seo_people_also_ask( - search_query="search_query", - site_filter="site_filter", -) +client.post_v3doc_extract_async() ```
@@ -509,208 +447,141 @@ client.seo_people_also_ask(
-**search_query:** `str` +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+ +
-
-
-**site_filter:** `str` -
+
+
client.post_v3compare_llm_async()
-**example_id:** `typing.Optional[str]` - -
-
+#### 🔌 Usage
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` - -
-
-
-**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
+```python +from gooey import Gooey -
-
+client = Gooey( + api_key="YOUR_API_KEY", +) +client.post_v3compare_llm_async() -**task_instructions:** `typing.Optional[str]` - +```
- -
-
- -**query_instructions:** `typing.Optional[str]` -
+#### ⚙️ Parameters +
-**selected_model:** `typing.Optional[RelatedQnAPageRequestSelectedModel]` - -
-
-
-**max_search_urls:** `typing.Optional[int]` +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
- -
-
- -**max_references:** `typing.Optional[int]` -
-
-
-**max_context_words:** `typing.Optional[int]` -
+
+
client.post_v3doc_search_async()
-**scroll_jump:** `typing.Optional[int]` - -
-
+#### 🔌 Usage
-**embedding_model:** `typing.Optional[RelatedQnAPageRequestEmbeddingModel]` - -
-
-
-**dense_weight:** `typing.Optional[float]` +```python +from gooey import Gooey +client = Gooey( + api_key="YOUR_API_KEY", +) +client.post_v3doc_search_async() -Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. -Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - - +```
- -
-
- -**avoid_repetition:** `typing.Optional[bool]` -
+#### ⚙️ Parameters +
-**num_outputs:** `typing.Optional[int]` - -
-
-
-**quality:** `typing.Optional[float]` +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
- -
-
- -**max_tokens:** `typing.Optional[int]` -
-
-
-**sampling_temperature:** `typing.Optional[float]` -
+
+
client.post_v3smart_gpt_async()
-**response_format_type:** `typing.Optional[RelatedQnAPageRequestResponseFormatType]` - -
-
+#### 🔌 Usage
-**serp_search_location:** `typing.Optional[SerpSearchLocation]` - -
-
-
-**scaleserp_locations:** `typing.Optional[typing.Sequence[str]]` — DEPRECATED: use `serp_search_location` instead - -
-
+```python +from gooey import Gooey -
-
+client = Gooey( + api_key="YOUR_API_KEY", +) +client.post_v3smart_gpt_async() -**serp_search_type:** `typing.Optional[SerpSearchType]` - +```
- -
-
- -**scaleserp_search_field:** `typing.Optional[str]` — DEPRECATED: use `serp_search_type` instead -
+#### ⚙️ Parameters +
-**settings:** `typing.Optional[RunSettings]` - -
-
-
@@ -726,7 +597,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-
client.seo_content(...) +
client.post_v3doc_summary_async()
@@ -744,12 +615,7 @@ from gooey import Gooey client = Gooey( api_key="YOUR_API_KEY", ) -client.seo_content( - search_query="search_query", - keywords="keywords", - title="title", - company_url="company_url", -) +client.post_v3doc_summary_async() ```
@@ -765,179 +631,141 @@ client.seo_content(
-**search_query:** `str` +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
- -
-
- -**keywords:** `str` -
-
-
-**title:** `str` -
+
+
client.post_v3functions_async()
-**company_url:** `str` - -
-
+#### 🔌 Usage
-**example_id:** `typing.Optional[str]` - -
-
-
-**task_instructions:** `typing.Optional[str]` - -
-
+```python +from gooey import Gooey -
-
+client = Gooey( + api_key="YOUR_API_KEY", +) +client.post_v3functions_async() -**enable_html:** `typing.Optional[bool]` - +```
- -
-
- -**selected_model:** `typing.Optional[SeoSummaryPageRequestSelectedModel]` -
+#### ⚙️ Parameters +
-**max_search_urls:** `typing.Optional[int]` - -
-
-
-**enable_crosslinks:** `typing.Optional[bool]` +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
- -
-
- -**seed:** `typing.Optional[int]` -
-
-
-**avoid_repetition:** `typing.Optional[bool]` -
+
+
client.post_v3lipsync_async()
-**num_outputs:** `typing.Optional[int]` - -
-
+#### 🔌 Usage
-**quality:** `typing.Optional[float]` - -
-
-
-**max_tokens:** `typing.Optional[int]` - +```python +from gooey import Gooey + +client = Gooey( + api_key="YOUR_API_KEY", +) +client.post_v3lipsync_async() + +``` +
+
+#### ⚙️ Parameters +
-**sampling_temperature:** `typing.Optional[float]` - -
-
-
-**response_format_type:** `typing.Optional[SeoSummaryPageRequestResponseFormatType]` +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+ + -
-
-**serp_search_location:** `typing.Optional[SerpSearchLocation]` -
+
+
client.post_v3lipsync_tts_async()
-**scaleserp_locations:** `typing.Optional[typing.Sequence[str]]` — DEPRECATED: use `serp_search_location` instead - -
-
+#### 🔌 Usage
-**serp_search_type:** `typing.Optional[SerpSearchType]` - -
-
-
-**scaleserp_search_field:** `typing.Optional[str]` — DEPRECATED: use `serp_search_type` instead - +```python +from gooey import Gooey + +client = Gooey( + api_key="YOUR_API_KEY", +) +client.post_v3lipsync_tts_async() + +```
+ + + +#### ⚙️ Parameters
-**settings:** `typing.Optional[RunSettings]` - -
-
-
@@ -953,7 +781,7 @@ client.seo_content(
-
client.web_search_llm(...) +
client.post_v3text_to_speech_async()
@@ -971,10 +799,7 @@ from gooey import Gooey client = Gooey( api_key="YOUR_API_KEY", ) -client.web_search_llm( - search_query="search_query", - site_filter="site_filter", -) +client.post_v3text_to_speech_async() ```
@@ -990,208 +815,141 @@ client.web_search_llm(
-**search_query:** `str` +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+ +
-
-
-**site_filter:** `str` -
+
+
client.post_v3asr_async()
-**example_id:** `typing.Optional[str]` - -
-
+#### 🔌 Usage
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` - -
-
-
-**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
+```python +from gooey import Gooey -
-
+client = Gooey( + api_key="YOUR_API_KEY", +) +client.post_v3asr_async() -**task_instructions:** `typing.Optional[str]` - +```
- -
-
- -**query_instructions:** `typing.Optional[str]` -
+#### ⚙️ Parameters +
-**selected_model:** `typing.Optional[GoogleGptPageRequestSelectedModel]` - -
-
-
-**max_search_urls:** `typing.Optional[int]` +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
- -
-
- -**max_references:** `typing.Optional[int]` -
-
-
-**max_context_words:** `typing.Optional[int]` -
+
+
client.post_v3text2audio_async()
-**scroll_jump:** `typing.Optional[int]` - -
-
+#### 🔌 Usage
-**embedding_model:** `typing.Optional[GoogleGptPageRequestEmbeddingModel]` - -
-
-
-**dense_weight:** `typing.Optional[float]` +```python +from gooey import Gooey +client = Gooey( + api_key="YOUR_API_KEY", +) +client.post_v3text2audio_async() -Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. -Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - - +```
- -
-
- -**avoid_repetition:** `typing.Optional[bool]` -
+#### ⚙️ Parameters +
-**num_outputs:** `typing.Optional[int]` - -
-
-
-**quality:** `typing.Optional[float]` +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
- -
-
- -**max_tokens:** `typing.Optional[int]` -
-
-
-**sampling_temperature:** `typing.Optional[float]` -
+
+
client.post_v3translate_async()
-**response_format_type:** `typing.Optional[GoogleGptPageRequestResponseFormatType]` - -
-
+#### 🔌 Usage
-**serp_search_location:** `typing.Optional[SerpSearchLocation]` - -
-
-
-**scaleserp_locations:** `typing.Optional[typing.Sequence[str]]` — DEPRECATED: use `serp_search_location` instead - -
-
+```python +from gooey import Gooey -
-
+client = Gooey( + api_key="YOUR_API_KEY", +) +client.post_v3translate_async() -**serp_search_type:** `typing.Optional[SerpSearchType]` - +```
- -
-
- -**scaleserp_search_field:** `typing.Optional[str]` — DEPRECATED: use `serp_search_type` instead -
+#### ⚙️ Parameters +
-**settings:** `typing.Optional[RunSettings]` - -
-
-
@@ -1207,7 +965,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-
client.personalize_email(...) +
client.post_v3img2img_async()
@@ -1225,9 +983,7 @@ from gooey import Gooey client = Gooey( api_key="YOUR_API_KEY", ) -client.personalize_email( - email_address="email_address", -) +client.post_v3img2img_async() ```
@@ -1243,107 +999,95 @@ client.personalize_email(
-**email_address:** `str` +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
- -
-
- -**example_id:** `typing.Optional[str]` -
-
-
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` -
+
+
client.post_v3compare_text2img_async()
-**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
+#### 🔌 Usage
-**input_prompt:** `typing.Optional[str]` - -
-
-
-**selected_model:** `typing.Optional[SocialLookupEmailPageRequestSelectedModel]` - +```python +from gooey import Gooey + +client = Gooey( + api_key="YOUR_API_KEY", +) +client.post_v3compare_text2img_async() + +``` +
+
+#### ⚙️ Parameters +
-**avoid_repetition:** `typing.Optional[bool]` - -
-
-
-**num_outputs:** `typing.Optional[int]` +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+ + -
-
-**quality:** `typing.Optional[float]` -
+
+
client.post_v3object_inpainting_async()
-**max_tokens:** `typing.Optional[int]` - -
-
+#### 🔌 Usage
-**sampling_temperature:** `typing.Optional[float]` - -
-
-
-**response_format_type:** `typing.Optional[SocialLookupEmailPageRequestResponseFormatType]` - +```python +from gooey import Gooey + +client = Gooey( + api_key="YOUR_API_KEY", +) +client.post_v3object_inpainting_async() + +``` +
+
+#### ⚙️ Parameters +
-**settings:** `typing.Optional[RunSettings]` - -
-
-
@@ -1359,7 +1103,7 @@ client.personalize_email(
-
client.bulk_run(...) +
client.post_v3face_inpainting_async()
@@ -1377,12 +1121,7 @@ from gooey import Gooey client = Gooey( api_key="YOUR_API_KEY", ) -client.bulk_run( - documents=["documents"], - run_urls=["run_urls"], - input_columns={"key": "value"}, - output_columns={"key": "value"}, -) +client.post_v3face_inpainting_async() ```
@@ -1398,98 +1137,49 @@ client.bulk_run(
-**documents:** `typing.Sequence[str]` - - -Upload or link to a CSV or google sheet that contains your sample input data. -For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs. -Remember to includes header names in your CSV too. - +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
- -
-
- -**run_urls:** `typing.Sequence[str]` - - -Provide one or more Gooey.AI workflow runs. -You can add multiple runs from the same recipe (e.g. two versions of your copilot) and we'll run the inputs over both of them. - -
-
-
- -**input_columns:** `typing.Dict[str, str]` - -For each input field in the Gooey.AI workflow, specify the column in your input data that corresponds to it. - -
+
+
client.post_v3email_face_inpainting_async()
-**output_columns:** `typing.Dict[str, str]` - - -For each output field in the Gooey.AI workflow, specify the column name that you'd like to use for it in the output data. - - -
-
+#### 🔌 Usage
-**example_id:** `typing.Optional[str]` - -
-
-
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` - -
-
+```python +from gooey import Gooey -
-
+client = Gooey( + api_key="YOUR_API_KEY", +) +client.post_v3email_face_inpainting_async() -**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments - +```
- -
-
- -**eval_urls:** `typing.Optional[typing.Sequence[str]]` - - -_(optional)_ Add one or more Gooey.AI Evaluator Workflows to evaluate the results of your runs. - -
+#### ⚙️ Parameters +
-**settings:** `typing.Optional[RunSettings]` - -
-
-
@@ -1505,7 +1195,7 @@ _(optional)_ Add one or more Gooey.AI Evaluator Workflows to evaluate the result
-
client.synthesize_data(...) +
client.post_v3google_image_gen_async()
@@ -1523,9 +1213,7 @@ from gooey import Gooey client = Gooey( api_key="YOUR_API_KEY", ) -client.synthesize_data( - documents=["documents"], -) +client.post_v3google_image_gen_async() ```
@@ -1541,142 +1229,95 @@ client.synthesize_data(
-**documents:** `typing.Sequence[str]` +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
- -
-
- -**example_id:** `typing.Optional[str]` -
-
-
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` -
+
+
client.post_v3image_segmentation_async()
-**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
+#### 🔌 Usage
-**sheet_url:** `typing.Optional[str]` - -
-
-
-**selected_asr_model:** `typing.Optional[DocExtractPageRequestSelectedAsrModel]` - -
-
+```python +from gooey import Gooey -
-
+client = Gooey( + api_key="YOUR_API_KEY", +) +client.post_v3image_segmentation_async() -**google_translate_target:** `typing.Optional[str]` - +```
- -
-
- -**glossary_document:** `typing.Optional[str]` - -Provide a glossary to customize translation and improve accuracy of domain-specific terms. -If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing). -
+#### ⚙️ Parameters +
-**task_instructions:** `typing.Optional[str]` - -
-
-
-**selected_model:** `typing.Optional[DocExtractPageRequestSelectedModel]` +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
- -
-
- -**avoid_repetition:** `typing.Optional[bool]` -
-
-
-**num_outputs:** `typing.Optional[int]` -
+
+
client.post_v3compare_ai_upscalers_async()
-**quality:** `typing.Optional[float]` - -
-
+#### 🔌 Usage
-**max_tokens:** `typing.Optional[int]` - -
-
-
-**sampling_temperature:** `typing.Optional[float]` - -
-
+```python +from gooey import Gooey -
-
+client = Gooey( + api_key="YOUR_API_KEY", +) +client.post_v3compare_ai_upscalers_async() -**response_format_type:** `typing.Optional[DocExtractPageRequestResponseFormatType]` - +```
+ + + +#### ⚙️ Parameters
-**settings:** `typing.Optional[RunSettings]` - -
-
-
@@ -1692,7 +1333,7 @@ If not specified or invalid, no glossary will be used. Read about the expected f
-
client.llm(...) +
client.post_v3chyron_plant_async()
@@ -1710,7 +1351,7 @@ from gooey import Gooey client = Gooey( api_key="YOUR_API_KEY", ) -client.llm() +client.post_v3chyron_plant_async() ```
@@ -1726,99 +1367,95 @@ client.llm()
-**example_id:** `typing.Optional[str]` +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+ +
-
-
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` -
+
+
client.post_v3letter_writer_async()
-**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
+#### 🔌 Usage
-**input_prompt:** `typing.Optional[str]` - -
-
-
-**selected_models:** `typing.Optional[typing.Sequence[CompareLlmPageRequestSelectedModelsItem]]` - +```python +from gooey import Gooey + +client = Gooey( + api_key="YOUR_API_KEY", +) +client.post_v3letter_writer_async() + +```
+ + + +#### ⚙️ Parameters
-**avoid_repetition:** `typing.Optional[bool]` - -
-
-
-**num_outputs:** `typing.Optional[int]` +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+ + -
-
-**quality:** `typing.Optional[float]` -
+
+
client.post_v3embeddings_async()
-**max_tokens:** `typing.Optional[int]` - -
-
+#### 🔌 Usage
-**sampling_temperature:** `typing.Optional[float]` - -
-
-
-**response_format_type:** `typing.Optional[CompareLlmPageRequestResponseFormatType]` - +```python +from gooey import Gooey + +client = Gooey( + api_key="YOUR_API_KEY", +) +client.post_v3embeddings_async() + +``` +
+
+#### ⚙️ Parameters +
-**settings:** `typing.Optional[RunSettings]` - -
-
-
@@ -1834,7 +1471,7 @@ client.llm()
-
client.rag(...) +
client.post_v3related_qna_maker_doc_async()
@@ -1852,9 +1489,7 @@ from gooey import Gooey client = Gooey( api_key="YOUR_API_KEY", ) -client.rag( - search_query="search_query", -) +client.post_v3related_qna_maker_doc_async() ```
@@ -1870,116 +1505,102 @@ client.rag(
-**search_query:** `str` +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
- -
-
- -**example_id:** `typing.Optional[str]` -
-
-
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` -
+
+
client.health_status_get()
-**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
+#### 🔌 Usage
-**keyword_query:** `typing.Optional[DocSearchPageRequestKeywordQuery]` - -
-
-
-**documents:** `typing.Optional[typing.Sequence[str]]` - +```python +from gooey import Gooey + +client = Gooey( + api_key="YOUR_API_KEY", +) +client.health_status_get() + +``` +
+
+#### ⚙️ Parameters +
-**max_references:** `typing.Optional[int]` - -
-
-
-**max_context_words:** `typing.Optional[int]` +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+ + -
-
-**scroll_jump:** `typing.Optional[int]` -
+
+## CopilotIntegrations +
client.copilot_integrations.video_bots_stream_create(...)
-**doc_extract_url:** `typing.Optional[str]` - -
-
+#### 🔌 Usage
-**embedding_model:** `typing.Optional[DocSearchPageRequestEmbeddingModel]` - -
-
-
-**dense_weight:** `typing.Optional[float]` +```python +from gooey import Gooey +client = Gooey( + api_key="YOUR_API_KEY", +) +client.copilot_integrations.video_bots_stream_create( + integration_id="integration_id", +) -Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. -Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - - +``` +
+
+#### ⚙️ Parameters +
-**task_instructions:** `typing.Optional[str]` - -
-
-
-**query_instructions:** `typing.Optional[str]` +**integration_id:** `str` — Your Integration ID as shown in the Copilot Integrations tab
@@ -1987,7 +1608,13 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**selected_model:** `typing.Optional[DocSearchPageRequestSelectedModel]` +**conversation_id:** `typing.Optional[str]` + +The gooey conversation ID. + +If not provided, a new conversation will be started and a new ID will be returned in the response. Use this to maintain the state of the conversation between requests. + +Note that you may not provide a custom ID here, and must only use the `conversation_id` returned in a previous response.
@@ -1995,4290 +1622,11 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**citation_style:** `typing.Optional[DocSearchPageRequestCitationStyle]` - -
-
- -
-
- -**avoid_repetition:** `typing.Optional[bool]` - -
-
- -
-
- -**num_outputs:** `typing.Optional[int]` - -
-
- -
-
- -**quality:** `typing.Optional[float]` - -
-
- -
-
- -**max_tokens:** `typing.Optional[int]` - -
-
- -
-
- -**sampling_temperature:** `typing.Optional[float]` - -
-
- -
-
- -**response_format_type:** `typing.Optional[DocSearchPageRequestResponseFormatType]` - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
- - - - - - -
- -
client.doc_summary(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.doc_summary( - documents=["documents"], -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**documents:** `typing.Sequence[str]` - -
-
- -
-
- -**example_id:** `typing.Optional[str]` - -
-
- -
-
- -**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**task_instructions:** `typing.Optional[str]` - -
-
- -
-
- -**merge_instructions:** `typing.Optional[str]` - -
-
- -
-
- -**selected_model:** `typing.Optional[DocSummaryPageRequestSelectedModel]` - -
-
- -
-
- -**chain_type:** `typing.Optional[typing.Literal["map_reduce"]]` - -
-
- -
-
- -**selected_asr_model:** `typing.Optional[DocSummaryPageRequestSelectedAsrModel]` - -
-
- -
-
- -**google_translate_target:** `typing.Optional[str]` - -
-
- -
-
- -**avoid_repetition:** `typing.Optional[bool]` - -
-
- -
-
- -**num_outputs:** `typing.Optional[int]` - -
-
- -
-
- -**quality:** `typing.Optional[float]` - -
-
- -
-
- -**max_tokens:** `typing.Optional[int]` - -
-
- -
-
- -**sampling_temperature:** `typing.Optional[float]` - -
-
- -
-
- -**response_format_type:** `typing.Optional[DocSummaryPageRequestResponseFormatType]` - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.lipsync_tts(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.lipsync_tts( - text_prompt="text_prompt", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**text_prompt:** `str` - -
-
- -
-
- -**example_id:** `typing.Optional[str]` - -
-
- -
-
- -**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**tts_provider:** `typing.Optional[LipsyncTtsPageRequestTtsProvider]` - -
-
- -
-
- -**uberduck_voice_name:** `typing.Optional[str]` - -
-
- -
-
- -**uberduck_speaking_rate:** `typing.Optional[float]` - -
-
- -
-
- -**google_voice_name:** `typing.Optional[str]` - -
-
- -
-
- -**google_speaking_rate:** `typing.Optional[float]` - -
-
- -
-
- -**google_pitch:** `typing.Optional[float]` - -
-
- -
-
- -**bark_history_prompt:** `typing.Optional[str]` - -
-
- -
-
- -**elevenlabs_voice_name:** `typing.Optional[str]` — Use `elevenlabs_voice_id` instead - -
-
- -
-
- -**elevenlabs_api_key:** `typing.Optional[str]` - -
-
- -
-
- -**elevenlabs_voice_id:** `typing.Optional[str]` - -
-
- -
-
- -**elevenlabs_model:** `typing.Optional[str]` - -
-
- -
-
- -**elevenlabs_stability:** `typing.Optional[float]` - -
-
- -
-
- -**elevenlabs_similarity_boost:** `typing.Optional[float]` - -
-
- -
-
- -**elevenlabs_style:** `typing.Optional[float]` - -
-
- -
-
- -**elevenlabs_speaker_boost:** `typing.Optional[bool]` - -
-
- -
-
- -**azure_voice_name:** `typing.Optional[str]` - -
-
- -
-
- -**openai_voice_name:** `typing.Optional[LipsyncTtsPageRequestOpenaiVoiceName]` - -
-
- -
-
- -**openai_tts_model:** `typing.Optional[LipsyncTtsPageRequestOpenaiTtsModel]` - -
-
- -
-
- -**input_face:** `typing.Optional[str]` - -
-
- -
-
- -**face_padding_top:** `typing.Optional[int]` - -
-
- -
-
- -**face_padding_bottom:** `typing.Optional[int]` - -
-
- -
-
- -**face_padding_left:** `typing.Optional[int]` - -
-
- -
-
- -**face_padding_right:** `typing.Optional[int]` - -
-
- -
-
- -**sadtalker_settings:** `typing.Optional[SadTalkerSettings]` - -
-
- -
-
- -**selected_model:** `typing.Optional[LipsyncTtsPageRequestSelectedModel]` - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.text_to_speech(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.text_to_speech( - text_prompt="text_prompt", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**text_prompt:** `str` - -
-
- -
-
- -**example_id:** `typing.Optional[str]` - -
-
- -
-
- -**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**tts_provider:** `typing.Optional[TextToSpeechPageRequestTtsProvider]` - -
-
- -
-
- -**uberduck_voice_name:** `typing.Optional[str]` - -
-
- -
-
- -**uberduck_speaking_rate:** `typing.Optional[float]` - -
-
- -
-
- -**google_voice_name:** `typing.Optional[str]` - -
-
- -
-
- -**google_speaking_rate:** `typing.Optional[float]` - -
-
- -
-
- -**google_pitch:** `typing.Optional[float]` - -
-
- -
-
- -**bark_history_prompt:** `typing.Optional[str]` - -
-
- -
-
- -**elevenlabs_voice_name:** `typing.Optional[str]` — Use `elevenlabs_voice_id` instead - -
-
- -
-
- -**elevenlabs_api_key:** `typing.Optional[str]` - -
-
- -
-
- -**elevenlabs_voice_id:** `typing.Optional[str]` - -
-
- -
-
- -**elevenlabs_model:** `typing.Optional[str]` - -
-
- -
-
- -**elevenlabs_stability:** `typing.Optional[float]` - -
-
- -
-
- -**elevenlabs_similarity_boost:** `typing.Optional[float]` - -
-
- -
-
- -**elevenlabs_style:** `typing.Optional[float]` - -
-
- -
-
- -**elevenlabs_speaker_boost:** `typing.Optional[bool]` - -
-
- -
-
- -**azure_voice_name:** `typing.Optional[str]` - -
-
- -
-
- -**openai_voice_name:** `typing.Optional[TextToSpeechPageRequestOpenaiVoiceName]` - -
-
- -
-
- -**openai_tts_model:** `typing.Optional[TextToSpeechPageRequestOpenaiTtsModel]` - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.speech_recognition(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.speech_recognition( - documents=["documents"], -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**documents:** `typing.Sequence[str]` - -
-
- -
-
- -**example_id:** `typing.Optional[str]` - -
-
- -
-
- -**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**selected_model:** `typing.Optional[AsrPageRequestSelectedModel]` - -
-
- -
-
- -**language:** `typing.Optional[str]` - -
-
- -
-
- -**translation_model:** `typing.Optional[AsrPageRequestTranslationModel]` - -
-
- -
-
- -**output_format:** `typing.Optional[AsrPageRequestOutputFormat]` - -
-
- -
-
- -**google_translate_target:** `typing.Optional[str]` — use `translation_model` & `translation_target` instead. - -
-
- -
-
- -**translation_source:** `typing.Optional[str]` - -
-
- -
-
- -**translation_target:** `typing.Optional[str]` - -
-
- -
-
- -**glossary_document:** `typing.Optional[str]` - -Provide a glossary to customize translation and improve accuracy of domain-specific terms. -If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing). - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.text_to_music(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.text_to_music( - text_prompt="text_prompt", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**text_prompt:** `str` - -
-
- -
-
- -**example_id:** `typing.Optional[str]` - -
-
- -
-
- -**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**negative_prompt:** `typing.Optional[str]` - -
-
- -
-
- -**duration_sec:** `typing.Optional[float]` - -
-
- -
-
- -**num_outputs:** `typing.Optional[int]` - -
-
- -
-
- -**quality:** `typing.Optional[int]` - -
-
- -
-
- -**guidance_scale:** `typing.Optional[float]` - -
-
- -
-
- -**seed:** `typing.Optional[int]` - -
-
- -
-
- -**sd2upscaling:** `typing.Optional[bool]` - -
-
- -
-
- -**selected_models:** `typing.Optional[typing.Sequence[typing.Literal["audio_ldm"]]]` - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.translate(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.translate() - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**example_id:** `typing.Optional[str]` - -
-
- -
-
- -**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**texts:** `typing.Optional[typing.Sequence[str]]` - -
-
- -
-
- -**selected_model:** `typing.Optional[TranslationPageRequestSelectedModel]` - -
-
- -
-
- -**translation_source:** `typing.Optional[str]` - -
-
- -
-
- -**translation_target:** `typing.Optional[str]` - -
-
- -
-
- -**glossary_document:** `typing.Optional[str]` - -Provide a glossary to customize translation and improve accuracy of domain-specific terms. -If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing). - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.remix_image(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.remix_image( - input_image="input_image", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**input_image:** `str` - -
-
- -
-
- -**example_id:** `typing.Optional[str]` - -
-
- -
-
- -**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**text_prompt:** `typing.Optional[str]` - -
-
- -
-
- -**selected_model:** `typing.Optional[Img2ImgPageRequestSelectedModel]` - -
-
- -
-
- -**selected_controlnet_model:** `typing.Optional[Img2ImgPageRequestSelectedControlnetModel]` - -
-
- -
-
- -**negative_prompt:** `typing.Optional[str]` - -
-
- -
-
- -**num_outputs:** `typing.Optional[int]` - -
-
- -
-
- -**quality:** `typing.Optional[int]` - -
-
- -
-
- -**output_width:** `typing.Optional[int]` - -
-
- -
-
- -**output_height:** `typing.Optional[int]` - -
-
- -
-
- -**guidance_scale:** `typing.Optional[float]` - -
-
- -
-
- -**prompt_strength:** `typing.Optional[float]` - -
-
- -
-
- -**controlnet_conditioning_scale:** `typing.Optional[typing.Sequence[float]]` - -
-
- -
-
- -**seed:** `typing.Optional[int]` - -
-
- -
-
- -**image_guidance_scale:** `typing.Optional[float]` - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.text_to_image(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.text_to_image( - text_prompt="text_prompt", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**text_prompt:** `str` - -
-
- -
-
- -**example_id:** `typing.Optional[str]` - -
-
- -
-
- -**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**negative_prompt:** `typing.Optional[str]` - -
-
- -
-
- -**output_width:** `typing.Optional[int]` - -
-
- -
-
- -**output_height:** `typing.Optional[int]` - -
-
- -
-
- -**num_outputs:** `typing.Optional[int]` - -
-
- -
-
- -**quality:** `typing.Optional[int]` - -
-
- -
-
- -**dall_e3quality:** `typing.Optional[str]` - -
-
- -
-
- -**dall_e3style:** `typing.Optional[str]` - -
-
- -
-
- -**guidance_scale:** `typing.Optional[float]` - -
-
- -
-
- -**seed:** `typing.Optional[int]` - -
-
- -
-
- -**sd2upscaling:** `typing.Optional[bool]` - -
-
- -
-
- -**selected_models:** `typing.Optional[typing.Sequence[CompareText2ImgPageRequestSelectedModelsItem]]` - -
-
- -
-
- -**scheduler:** `typing.Optional[CompareText2ImgPageRequestScheduler]` - -
-
- -
-
- -**edit_instruction:** `typing.Optional[str]` - -
-
- -
-
- -**image_guidance_scale:** `typing.Optional[float]` - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.product_image(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.product_image( - input_image="input_image", - text_prompt="text_prompt", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**input_image:** `str` - -
-
- -
-
- -**text_prompt:** `str` - -
-
- -
-
- -**example_id:** `typing.Optional[str]` - -
-
- -
-
- -**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**obj_scale:** `typing.Optional[float]` - -
-
- -
-
- -**obj_pos_x:** `typing.Optional[float]` - -
-
- -
-
- -**obj_pos_y:** `typing.Optional[float]` - -
-
- -
-
- -**mask_threshold:** `typing.Optional[float]` - -
-
- -
-
- -**selected_model:** `typing.Optional[ObjectInpaintingPageRequestSelectedModel]` - -
-
- -
-
- -**negative_prompt:** `typing.Optional[str]` - -
-
- -
-
- -**num_outputs:** `typing.Optional[int]` - -
-
- -
-
- -**quality:** `typing.Optional[int]` - -
-
- -
-
- -**output_width:** `typing.Optional[int]` - -
-
- -
-
- -**output_height:** `typing.Optional[int]` - -
-
- -
-
- -**guidance_scale:** `typing.Optional[float]` - -
-
- -
-
- -**sd2upscaling:** `typing.Optional[bool]` - -
-
- -
-
- -**seed:** `typing.Optional[int]` - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.portrait(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.portrait( - input_image="input_image", - text_prompt="tony stark from the iron man", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**input_image:** `str` - -
-
- -
-
- -**text_prompt:** `str` - -
-
- -
-
- -**example_id:** `typing.Optional[str]` - -
-
- -
-
- -**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**face_scale:** `typing.Optional[float]` - -
-
- -
-
- -**face_pos_x:** `typing.Optional[float]` - -
-
- -
-
- -**face_pos_y:** `typing.Optional[float]` - -
-
- -
-
- -**selected_model:** `typing.Optional[FaceInpaintingPageRequestSelectedModel]` - -
-
- -
-
- -**negative_prompt:** `typing.Optional[str]` - -
-
- -
-
- -**num_outputs:** `typing.Optional[int]` - -
-
- -
-
- -**quality:** `typing.Optional[int]` - -
-
- -
-
- -**upscale_factor:** `typing.Optional[float]` - -
-
- -
-
- -**output_width:** `typing.Optional[int]` - -
-
- -
-
- -**output_height:** `typing.Optional[int]` - -
-
- -
-
- -**guidance_scale:** `typing.Optional[float]` - -
-
- -
-
- -**seed:** `typing.Optional[int]` - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.image_from_email(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.image_from_email( - email_address="sean@dara.network", - text_prompt="winter's day in paris", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**text_prompt:** `str` - -
-
- -
-
- -**example_id:** `typing.Optional[str]` - -
-
- -
-
- -**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**email_address:** `typing.Optional[str]` - -
-
- -
-
- -**twitter_handle:** `typing.Optional[str]` - -
-
- -
-
- -**face_scale:** `typing.Optional[float]` - -
-
- -
-
- -**face_pos_x:** `typing.Optional[float]` - -
-
- -
-
- -**face_pos_y:** `typing.Optional[float]` - -
-
- -
-
- -**selected_model:** `typing.Optional[EmailFaceInpaintingPageRequestSelectedModel]` - -
-
- -
-
- -**negative_prompt:** `typing.Optional[str]` - -
-
- -
-
- -**num_outputs:** `typing.Optional[int]` - -
-
- -
-
- -**quality:** `typing.Optional[int]` - -
-
- -
-
- -**upscale_factor:** `typing.Optional[float]` - -
-
- -
-
- -**output_width:** `typing.Optional[int]` - -
-
- -
-
- -**output_height:** `typing.Optional[int]` - -
-
- -
-
- -**guidance_scale:** `typing.Optional[float]` - -
-
- -
-
- -**should_send_email:** `typing.Optional[bool]` - -
-
- -
-
- -**email_from:** `typing.Optional[str]` - -
-
- -
-
- -**email_cc:** `typing.Optional[str]` - -
-
- -
-
- -**email_bcc:** `typing.Optional[str]` - -
-
- -
-
- -**email_subject:** `typing.Optional[str]` - -
-
- -
-
- -**email_body:** `typing.Optional[str]` - -
-
- -
-
- -**email_body_enable_html:** `typing.Optional[bool]` - -
-
- -
-
- -**fallback_email_body:** `typing.Optional[str]` - -
-
- -
-
- -**seed:** `typing.Optional[int]` - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.image_from_web_search(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.image_from_web_search( - search_query="search_query", - text_prompt="text_prompt", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**search_query:** `str` - -
-
- -
-
- -**text_prompt:** `str` - -
-
- -
-
- -**example_id:** `typing.Optional[str]` - -
-
- -
-
- -**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**serp_search_location:** `typing.Optional[SerpSearchLocation]` - -
-
- -
-
- -**scaleserp_locations:** `typing.Optional[typing.Sequence[str]]` — DEPRECATED: use `serp_search_location` instead - -
-
- -
-
- -**selected_model:** `typing.Optional[GoogleImageGenPageRequestSelectedModel]` - -
-
- -
-
- -**negative_prompt:** `typing.Optional[str]` - -
-
- -
-
- -**num_outputs:** `typing.Optional[int]` - -
-
- -
-
- -**quality:** `typing.Optional[int]` - -
-
- -
-
- -**guidance_scale:** `typing.Optional[float]` - -
-
- -
-
- -**prompt_strength:** `typing.Optional[float]` - -
-
- -
-
- -**sd2upscaling:** `typing.Optional[bool]` - -
-
- -
-
- -**seed:** `typing.Optional[int]` - -
-
- -
-
- -**image_guidance_scale:** `typing.Optional[float]` - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.remove_background(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.remove_background( - input_image="input_image", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**input_image:** `str` - -
-
- -
-
- -**example_id:** `typing.Optional[str]` - -
-
- -
-
- -**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**selected_model:** `typing.Optional[ImageSegmentationPageRequestSelectedModel]` - -
-
- -
-
- -**mask_threshold:** `typing.Optional[float]` - -
-
- -
-
- -**rect_persepective_transform:** `typing.Optional[bool]` - -
-
- -
-
- -**reflection_opacity:** `typing.Optional[float]` - -
-
- -
-
- -**obj_scale:** `typing.Optional[float]` - -
-
- -
-
- -**obj_pos_x:** `typing.Optional[float]` - -
-
- -
-
- -**obj_pos_y:** `typing.Optional[float]` - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.upscale(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.upscale( - scale=1, -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**scale:** `int` — The final upsampling scale of the image - -
-
- -
-
- -**example_id:** `typing.Optional[str]` - -
-
- -
-
- -**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**input_image:** `typing.Optional[str]` — Input Image - -
-
- -
-
- -**input_video:** `typing.Optional[str]` — Input Video - -
-
- -
-
- -**selected_models:** `typing.Optional[typing.Sequence[CompareUpscalerPageRequestSelectedModelsItem]]` - -
-
- -
-
- -**selected_bg_model:** `typing.Optional[typing.Literal["real_esrgan_x2"]]` - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.embed(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.embed( - texts=["texts"], -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**texts:** `typing.Sequence[str]` - -
-
- -
-
- -**example_id:** `typing.Optional[str]` - -
-
- -
-
- -**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**selected_model:** `typing.Optional[EmbeddingsPageRequestSelectedModel]` - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.seo_people_also_ask_doc(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.seo_people_also_ask_doc( - search_query="search_query", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**search_query:** `str` - -
-
- -
-
- -**example_id:** `typing.Optional[str]` - -
-
- -
-
- -**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**keyword_query:** `typing.Optional[RelatedQnADocPageRequestKeywordQuery]` - -
-
- -
-
- -**documents:** `typing.Optional[typing.Sequence[str]]` - -
-
- -
-
- -**max_references:** `typing.Optional[int]` - -
-
- -
-
- -**max_context_words:** `typing.Optional[int]` - -
-
- -
-
- -**scroll_jump:** `typing.Optional[int]` - -
-
- -
-
- -**doc_extract_url:** `typing.Optional[str]` - -
-
- -
-
- -**embedding_model:** `typing.Optional[RelatedQnADocPageRequestEmbeddingModel]` - -
-
- -
-
- -**dense_weight:** `typing.Optional[float]` - - -Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. -Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - - -
-
- -
-
- -**task_instructions:** `typing.Optional[str]` - -
-
- -
-
- -**query_instructions:** `typing.Optional[str]` - -
-
- -
-
- -**selected_model:** `typing.Optional[RelatedQnADocPageRequestSelectedModel]` - -
-
- -
-
- -**citation_style:** `typing.Optional[RelatedQnADocPageRequestCitationStyle]` - -
-
- -
-
- -**avoid_repetition:** `typing.Optional[bool]` - -
-
- -
-
- -**num_outputs:** `typing.Optional[int]` - -
-
- -
-
- -**quality:** `typing.Optional[float]` - -
-
- -
-
- -**max_tokens:** `typing.Optional[int]` - -
-
- -
-
- -**sampling_temperature:** `typing.Optional[float]` - -
-
- -
-
- -**response_format_type:** `typing.Optional[RelatedQnADocPageRequestResponseFormatType]` - -
-
- -
-
- -**serp_search_location:** `typing.Optional[SerpSearchLocation]` - -
-
- -
-
- -**scaleserp_locations:** `typing.Optional[typing.Sequence[str]]` — DEPRECATED: use `serp_search_location` instead - -
-
- -
-
- -**serp_search_type:** `typing.Optional[SerpSearchType]` - -
-
- -
-
- -**scaleserp_search_field:** `typing.Optional[str]` — DEPRECATED: use `serp_search_type` instead - -
-
- -
-
- -**settings:** `typing.Optional[RunSettings]` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.health_status_get() -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.health_status_get() - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -## CopilotIntegrations -
client.copilot_integrations.video_bots_stream_create(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.copilot_integrations.video_bots_stream_create( - integration_id="integration_id", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**integration_id:** `str` — Your Integration ID as shown in the Copilot Integrations tab - -
-
- -
-
- -**conversation_id:** `typing.Optional[str]` - -The gooey conversation ID. - -If not provided, a new conversation will be started and a new ID will be returned in the response. Use this to maintain the state of the conversation between requests. - -Note that you may not provide a custom ID here, and must only use the `conversation_id` returned in a previous response. - -
-
- -
-
- -**user_id:** `typing.Optional[str]` - -Your app's custom user ID. - -If not provided, a random user will be created and a new ID will be returned in the response. If a `conversation_id` is provided, this field is automatically set to the user's id associated with that conversation. - -
-
- -
-
- -**user_message_id:** `typing.Optional[str]` - -Your app's custom message ID for the user message. - -If not provided, a random ID will be generated and returned in the response. This is useful for tracking messages in the conversation. - -
-
- -
-
- -**button_pressed:** `typing.Optional[ButtonPressed]` — The button that was pressed by the user. - -
-
- -
-
- -**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**input_prompt:** `typing.Optional[str]` - -
-
- -
-
- -**input_audio:** `typing.Optional[str]` - -
-
- -
-
- -**input_images:** `typing.Optional[typing.Sequence[str]]` - -
-
- -
-
- -**input_documents:** `typing.Optional[typing.Sequence[str]]` - -
-
- -
-
- -**doc_extract_url:** `typing.Optional[str]` — Select a workflow to extract text from documents and images. - -
-
- -
-
- -**messages:** `typing.Optional[typing.Sequence[ConversationEntry]]` - -
-
- -
-
- -**bot_script:** `typing.Optional[str]` - -
-
- -
-
- -**selected_model:** `typing.Optional[CreateStreamRequestSelectedModel]` - -
-
- -
-
- -**document_model:** `typing.Optional[str]` — When your copilot users upload a photo or pdf, what kind of document are they mostly likely to upload? (via [Azure](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/how-to-guides/use-sdk-rest-api?view=doc-intel-3.1.0&tabs=linux&pivots=programming-language-rest-api)) - -
-
- -
-
- -**task_instructions:** `typing.Optional[str]` - -
-
- -
-
- -**query_instructions:** `typing.Optional[str]` - -
-
- -
-
- -**keyword_instructions:** `typing.Optional[str]` - -
-
- -
-
- -**documents:** `typing.Optional[typing.Sequence[str]]` - -
-
- -
-
- -**max_references:** `typing.Optional[int]` - -
-
- -
-
- -**max_context_words:** `typing.Optional[int]` - -
-
- -
-
- -**scroll_jump:** `typing.Optional[int]` - -
-
- -
-
- -**embedding_model:** `typing.Optional[CreateStreamRequestEmbeddingModel]` - -
-
- -
-
- -**dense_weight:** `typing.Optional[float]` - - -Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. -Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - - -
-
- -
-
- -**citation_style:** `typing.Optional[CreateStreamRequestCitationStyle]` - -
-
- -
-
- -**use_url_shortener:** `typing.Optional[bool]` - -
-
- -
-
- -**asr_model:** `typing.Optional[CreateStreamRequestAsrModel]` — Choose a model to transcribe incoming audio messages to text. - -
-
- -
-
- -**asr_language:** `typing.Optional[str]` — Choose a language to transcribe incoming audio messages to text. - -
-
- -
-
- -**translation_model:** `typing.Optional[CreateStreamRequestTranslationModel]` - -
-
- -
-
- -**user_language:** `typing.Optional[str]` — Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages. - -
-
- -
-
- -**input_glossary_document:** `typing.Optional[str]` - - -Translation Glossary for User Langauge -> LLM Language (English) - - -
-
- -
-
- -**output_glossary_document:** `typing.Optional[str]` - - -Translation Glossary for LLM Language (English) -> User Langauge - - -
-
- -
-
- -**lipsync_model:** `typing.Optional[CreateStreamRequestLipsyncModel]` - -
-
- -
-
- -**tools:** `typing.Optional[typing.Sequence[LlmTools]]` — Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling). - -
-
- -
-
- -**avoid_repetition:** `typing.Optional[bool]` - -
-
- -
-
- -**num_outputs:** `typing.Optional[int]` - -
-
- -
-
- -**quality:** `typing.Optional[float]` - -
-
- -
-
- -**max_tokens:** `typing.Optional[int]` - -
-
- -
-
- -**sampling_temperature:** `typing.Optional[float]` - -
-
- -
-
- -**response_format_type:** `typing.Optional[CreateStreamRequestResponseFormatType]` - -
-
- -
-
- -**tts_provider:** `typing.Optional[CreateStreamRequestTtsProvider]` - -
-
- -
-
- -**uberduck_voice_name:** `typing.Optional[str]` - -
-
- -
-
- -**uberduck_speaking_rate:** `typing.Optional[float]` - -
-
- -
-
- -**google_voice_name:** `typing.Optional[str]` - -
-
- -
-
- -**google_speaking_rate:** `typing.Optional[float]` - -
-
- -
-
- -**google_pitch:** `typing.Optional[float]` - -
-
- -
-
- -**bark_history_prompt:** `typing.Optional[str]` - -
-
- -
-
- -**elevenlabs_voice_name:** `typing.Optional[str]` — Use `elevenlabs_voice_id` instead - -
-
- -
-
- -**elevenlabs_api_key:** `typing.Optional[str]` - -
-
- -
-
- -**elevenlabs_voice_id:** `typing.Optional[str]` - -
-
- -
-
- -**elevenlabs_model:** `typing.Optional[str]` - -
-
- -
-
- -**elevenlabs_stability:** `typing.Optional[float]` - -
-
- -
-
- -**elevenlabs_similarity_boost:** `typing.Optional[float]` - -
-
- -
-
- -**elevenlabs_style:** `typing.Optional[float]` - -
-
- -
-
- -**elevenlabs_speaker_boost:** `typing.Optional[bool]` - -
-
- -
-
- -**azure_voice_name:** `typing.Optional[str]` - -
-
- -
-
- -**openai_voice_name:** `typing.Optional[CreateStreamRequestOpenaiVoiceName]` - -
-
- -
-
- -**openai_tts_model:** `typing.Optional[CreateStreamRequestOpenaiTtsModel]` - -
-
- -
-
- -**input_face:** `typing.Optional[str]` - -
-
- -
-
- -**face_padding_top:** `typing.Optional[int]` - -
-
- -
-
- -**face_padding_bottom:** `typing.Optional[int]` - -
-
- -
-
- -**face_padding_left:** `typing.Optional[int]` - -
-
- -
-
- -**face_padding_right:** `typing.Optional[int]` - -
-
- -
-
- -**sadtalker_settings:** `typing.Optional[SadTalkerSettings]` - -
-
- -
-
- -**input_text:** `typing.Optional[str]` — Use `input_prompt` instead - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -
client.copilot_integrations.video_bots_stream(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.copilot_integrations.video_bots_stream( - request_id="request_id", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**request_id:** `str` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -## CopilotForYourEnterprise -
client.copilot_for_your_enterprise.async_video_bots(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.copilot_for_your_enterprise.async_video_bots() - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**example_id:** `typing.Optional[str]` - -
-
- -
-
- -**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments - -
-
- -
-
- -**input_prompt:** `typing.Optional[str]` - -
-
- -
-
- -**input_audio:** `typing.Optional[str]` - -
-
- -
-
- -**input_images:** `typing.Optional[typing.Sequence[str]]` - -
-
- -
-
- -**input_documents:** `typing.Optional[typing.Sequence[str]]` - -
-
- -
-
- -**doc_extract_url:** `typing.Optional[str]` — Select a workflow to extract text from documents and images. - -
-
- -
-
- -**messages:** `typing.Optional[typing.Sequence[ConversationEntry]]` - -
-
- -
-
- -**bot_script:** `typing.Optional[str]` - -
-
- -
-
- -**selected_model:** `typing.Optional[VideoBotsPageRequestSelectedModel]` - -
-
- -
-
- -**document_model:** `typing.Optional[str]` — When your copilot users upload a photo or pdf, what kind of document are they mostly likely to upload? (via [Azure](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/how-to-guides/use-sdk-rest-api?view=doc-intel-3.1.0&tabs=linux&pivots=programming-language-rest-api)) - -
-
- -
-
- -**task_instructions:** `typing.Optional[str]` - -
-
- -
-
- -**query_instructions:** `typing.Optional[str]` - -
-
- -
-
- -**keyword_instructions:** `typing.Optional[str]` - -
-
- -
-
- -**documents:** `typing.Optional[typing.Sequence[str]]` - -
-
- -
-
- -**max_references:** `typing.Optional[int]` - -
-
- -
-
- -**max_context_words:** `typing.Optional[int]` - -
-
- -
-
- -**scroll_jump:** `typing.Optional[int]` - -
-
- -
-
- -**embedding_model:** `typing.Optional[VideoBotsPageRequestEmbeddingModel]` - -
-
- -
-
- -**dense_weight:** `typing.Optional[float]` - - -Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. -Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - - -
-
- -
-
- -**citation_style:** `typing.Optional[VideoBotsPageRequestCitationStyle]` - -
-
- -
-
- -**use_url_shortener:** `typing.Optional[bool]` - -
-
- -
-
- -**asr_model:** `typing.Optional[VideoBotsPageRequestAsrModel]` — Choose a model to transcribe incoming audio messages to text. - -
-
- -
-
- -**asr_language:** `typing.Optional[str]` — Choose a language to transcribe incoming audio messages to text. - -
-
- -
-
- -**translation_model:** `typing.Optional[VideoBotsPageRequestTranslationModel]` - -
-
- -
-
- -**user_language:** `typing.Optional[str]` — Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages. - -
-
- -
-
- -**input_glossary_document:** `typing.Optional[str]` - - -Translation Glossary for User Langauge -> LLM Language (English) - - -
-
- -
-
- -**output_glossary_document:** `typing.Optional[str]` - - -Translation Glossary for LLM Language (English) -> User Langauge - - -
-
- -
-
- -**lipsync_model:** `typing.Optional[VideoBotsPageRequestLipsyncModel]` - -
-
- -
-
- -**tools:** `typing.Optional[typing.Sequence[LlmTools]]` — Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling). - -
-
- -
-
- -**avoid_repetition:** `typing.Optional[bool]` - -
-
- -
-
- -**num_outputs:** `typing.Optional[int]` - -
-
- -
-
- -**quality:** `typing.Optional[float]` - -
-
- -
-
- -**max_tokens:** `typing.Optional[int]` - -
-
- -
-
- -**sampling_temperature:** `typing.Optional[float]` - -
-
- -
-
- -**response_format_type:** `typing.Optional[VideoBotsPageRequestResponseFormatType]` - -
-
- -
-
- -**tts_provider:** `typing.Optional[VideoBotsPageRequestTtsProvider]` - -
-
- -
-
- -**uberduck_voice_name:** `typing.Optional[str]` - -
-
- -
-
- -**uberduck_speaking_rate:** `typing.Optional[float]` - -
-
- -
-
- -**google_voice_name:** `typing.Optional[str]` - -
-
- -
-
- -**google_speaking_rate:** `typing.Optional[float]` - -
-
- -
-
- -**google_pitch:** `typing.Optional[float]` - -
-
- -
-
- -**bark_history_prompt:** `typing.Optional[str]` - -
-
- -
-
- -**elevenlabs_voice_name:** `typing.Optional[str]` — Use `elevenlabs_voice_id` instead - -
-
- -
-
- -**elevenlabs_api_key:** `typing.Optional[str]` - -
-
- -
-
- -**elevenlabs_voice_id:** `typing.Optional[str]` - -
-
- -
-
- -**elevenlabs_model:** `typing.Optional[str]` - -
-
- -
-
- -**elevenlabs_stability:** `typing.Optional[float]` - -
-
- -
-
- -**elevenlabs_similarity_boost:** `typing.Optional[float]` - -
-
- -
-
- -**elevenlabs_style:** `typing.Optional[float]` - -
-
- -
-
- -**elevenlabs_speaker_boost:** `typing.Optional[bool]` - -
-
- -
-
- -**azure_voice_name:** `typing.Optional[str]` - -
-
- -
-
- -**openai_voice_name:** `typing.Optional[VideoBotsPageRequestOpenaiVoiceName]` - -
-
- -
-
- -**openai_tts_model:** `typing.Optional[VideoBotsPageRequestOpenaiTtsModel]` - -
-
- -
-
- -**input_face:** `typing.Optional[str]` - -
-
- -
-
- -**face_padding_top:** `typing.Optional[int]` - -
-
+**user_id:** `typing.Optional[str]` -
-
+Your app's custom user ID. -**face_padding_bottom:** `typing.Optional[int]` +If not provided, a random user will be created and a new ID will be returned in the response. If a `conversation_id` is provided, this field is automatically set to the user's id associated with that conversation.
@@ -6286,15 +1634,11 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**face_padding_left:** `typing.Optional[int]` - -
-
+**user_message_id:** `typing.Optional[str]` -
-
+Your app's custom message ID for the user message. -**face_padding_right:** `typing.Optional[int]` +If not provided, a random ID will be generated and returned in the response. This is useful for tracking messages in the conversation.
@@ -6302,7 +1646,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**sadtalker_settings:** `typing.Optional[SadTalkerSettings]` +**button_pressed:** `typing.Optional[ButtonPressed]` — The button that was pressed by the user.
@@ -6310,7 +1654,7 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**settings:** `typing.Optional[RunSettings]` +**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
@@ -6318,62 +1662,39 @@ Translation Glossary for LLM Language (English) -> User Langauge
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments
-
-
+
+
+**input_prompt:** `typing.Optional[str]` +
-
-## Evaluator -
client.evaluator.async_bulk_eval(...)
-#### 🔌 Usage - -
-
+**input_audio:** `typing.Optional[str]` + +
+
-```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.evaluator.async_bulk_eval( - documents=["documents"], -) - -``` -
-
+**input_images:** `typing.Optional[typing.Sequence[str]]` +
-#### ⚙️ Parameters -
-
-
- -**documents:** `typing.Sequence[str]` - - -Upload or link to a CSV or google sheet that contains your sample input data. -For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs. -Remember to includes header names in your CSV too. - +**input_documents:** `typing.Optional[typing.Sequence[str]]`
@@ -6381,7 +1702,7 @@ Remember to includes header names in your CSV too.
-**example_id:** `typing.Optional[str]` +**doc_extract_url:** `typing.Optional[str]` — Select a workflow to extract text from documents and images.
@@ -6389,7 +1710,7 @@ Remember to includes header names in your CSV too.
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` +**messages:** `typing.Optional[typing.Sequence[ConversationEntry]]`
@@ -6397,7 +1718,7 @@ Remember to includes header names in your CSV too.
-**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments +**bot_script:** `typing.Optional[str]`
@@ -6405,12 +1726,7 @@ Remember to includes header names in your CSV too.
-**eval_prompts:** `typing.Optional[typing.Sequence[EvalPrompt]]` - - -Specify custom LLM prompts to calculate metrics that evaluate each row of the input data. The output should be a JSON object mapping the metric names to values. -_The `columns` dictionary can be used to reference the spreadsheet columns._ - +**selected_model:** `typing.Optional[CreateStreamRequestSelectedModel]`
@@ -6418,11 +1734,7 @@ _The `columns` dictionary can be used to reference the spreadsheet columns._
-**agg_functions:** `typing.Optional[typing.Sequence[AggFunction]]` - - -Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/pandas-docs/stable/reference/groupby.html#dataframegroupby-computations-descriptive-stats). - +**document_model:** `typing.Optional[str]` — When your copilot users upload a photo or pdf, what kind of document are they mostly likely to upload? (via [Azure](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/how-to-guides/use-sdk-rest-api?view=doc-intel-3.1.0&tabs=linux&pivots=programming-language-rest-api))
@@ -6430,7 +1742,7 @@ Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/
-**selected_model:** `typing.Optional[BulkEvalPageRequestSelectedModel]` +**task_instructions:** `typing.Optional[str]`
@@ -6438,7 +1750,7 @@ Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/
-**avoid_repetition:** `typing.Optional[bool]` +**query_instructions:** `typing.Optional[str]`
@@ -6446,7 +1758,7 @@ Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/
-**num_outputs:** `typing.Optional[int]` +**keyword_instructions:** `typing.Optional[str]`
@@ -6454,7 +1766,7 @@ Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/
-**quality:** `typing.Optional[float]` +**documents:** `typing.Optional[typing.Sequence[str]]`
@@ -6462,7 +1774,7 @@ Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/
-**max_tokens:** `typing.Optional[int]` +**max_references:** `typing.Optional[int]`
@@ -6470,7 +1782,7 @@ Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/
-**sampling_temperature:** `typing.Optional[float]` +**max_context_words:** `typing.Optional[int]`
@@ -6478,7 +1790,7 @@ Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/
-**response_format_type:** `typing.Optional[BulkEvalPageRequestResponseFormatType]` +**scroll_jump:** `typing.Optional[int]`
@@ -6486,7 +1798,7 @@ Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/
-**settings:** `typing.Optional[RunSettings]` +**embedding_model:** `typing.Optional[CreateStreamRequestEmbeddingModel]`
@@ -6494,56 +1806,36 @@ Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
+**dense_weight:** `typing.Optional[float]` +Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. +Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. + + -
-## SmartGpt -
client.smart_gpt.async_smart_gpt(...)
-#### 🔌 Usage - -
-
+**citation_style:** `typing.Optional[CreateStreamRequestCitationStyle]` + +
+
-```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.smart_gpt.async_smart_gpt( - input_prompt="input_prompt", -) - -``` -
-
+**use_url_shortener:** `typing.Optional[bool]` +
-#### ⚙️ Parameters - -
-
-
-**input_prompt:** `str` +**asr_model:** `typing.Optional[CreateStreamRequestAsrModel]` — Choose a model to transcribe incoming audio messages to text.
@@ -6551,7 +1843,7 @@ client.smart_gpt.async_smart_gpt(
-**example_id:** `typing.Optional[str]` +**asr_language:** `typing.Optional[str]` — Choose a language to transcribe incoming audio messages to text.
@@ -6559,7 +1851,7 @@ client.smart_gpt.async_smart_gpt(
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` +**translation_model:** `typing.Optional[CreateStreamRequestTranslationModel]`
@@ -6567,7 +1859,7 @@ client.smart_gpt.async_smart_gpt(
-**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments +**user_language:** `typing.Optional[str]` — Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages.
@@ -6575,7 +1867,11 @@ client.smart_gpt.async_smart_gpt(
-**cot_prompt:** `typing.Optional[str]` +**input_glossary_document:** `typing.Optional[str]` + + +Translation Glossary for User Langauge -> LLM Language (English) +
@@ -6583,7 +1879,11 @@ client.smart_gpt.async_smart_gpt(
-**reflexion_prompt:** `typing.Optional[str]` +**output_glossary_document:** `typing.Optional[str]` + + +Translation Glossary for LLM Language (English) -> User Langauge +
@@ -6591,7 +1891,7 @@ client.smart_gpt.async_smart_gpt(
-**dera_prompt:** `typing.Optional[str]` +**lipsync_model:** `typing.Optional[CreateStreamRequestLipsyncModel]`
@@ -6599,7 +1899,7 @@ client.smart_gpt.async_smart_gpt(
-**selected_model:** `typing.Optional[SmartGptPageRequestSelectedModel]` +**tools:** `typing.Optional[typing.Sequence[LlmTools]]` — Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling).
@@ -6647,7 +1947,7 @@ client.smart_gpt.async_smart_gpt(
-**response_format_type:** `typing.Optional[SmartGptPageRequestResponseFormatType]` +**response_format_type:** `typing.Optional[CreateStreamRequestResponseFormatType]`
@@ -6655,7 +1955,7 @@ client.smart_gpt.async_smart_gpt(
-**settings:** `typing.Optional[RunSettings]` +**tts_provider:** `typing.Optional[CreateStreamRequestTtsProvider]`
@@ -6663,54 +1963,39 @@ client.smart_gpt.async_smart_gpt(
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**uberduck_voice_name:** `typing.Optional[str]`
-
-
+
+
+**uberduck_speaking_rate:** `typing.Optional[float]` +
-
-## Functions -
client.functions.async_functions(...)
-#### 🔌 Usage - -
-
+**google_voice_name:** `typing.Optional[str]` + +
+
-```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.functions.async_functions() - -``` -
-
+**google_speaking_rate:** `typing.Optional[float]` +
-#### ⚙️ Parameters -
-
-
- -**example_id:** `typing.Optional[str]` +**google_pitch:** `typing.Optional[float]`
@@ -6718,7 +2003,7 @@ client.functions.async_functions()
-**code:** `typing.Optional[str]` — The JS code to be executed. +**bark_history_prompt:** `typing.Optional[str]`
@@ -6726,7 +2011,7 @@ client.functions.async_functions()
-**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used in the code +**elevenlabs_voice_name:** `typing.Optional[str]` — Use `elevenlabs_voice_id` instead
@@ -6734,7 +2019,7 @@ client.functions.async_functions()
-**settings:** `typing.Optional[RunSettings]` +**elevenlabs_api_key:** `typing.Optional[str]`
@@ -6742,54 +2027,55 @@ client.functions.async_functions()
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**elevenlabs_voice_id:** `typing.Optional[str]`
-
-
+
+
+**elevenlabs_model:** `typing.Optional[str]` +
-
-## LipSyncing -
client.lip_syncing.async_lipsync(...)
-#### 🔌 Usage +**elevenlabs_stability:** `typing.Optional[float]` + +
+
+**elevenlabs_similarity_boost:** `typing.Optional[float]` + +
+
+
-```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.lip_syncing.async_lipsync() - -``` -
-
+**elevenlabs_style:** `typing.Optional[float]` + -#### ⚙️ Parameters -
+**elevenlabs_speaker_boost:** `typing.Optional[bool]` + +
+
+
-**example_id:** `typing.Optional[str]` +**azure_voice_name:** `typing.Optional[str]`
@@ -6797,7 +2083,7 @@ client.lip_syncing.async_lipsync()
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` +**openai_voice_name:** `typing.Optional[CreateStreamRequestOpenaiVoiceName]`
@@ -6805,7 +2091,7 @@ client.lip_syncing.async_lipsync()
-**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments +**openai_tts_model:** `typing.Optional[CreateStreamRequestOpenaiTtsModel]`
@@ -6861,7 +2147,7 @@ client.lip_syncing.async_lipsync()
-**selected_model:** `typing.Optional[LipsyncPageRequestSelectedModel]` +**input_text:** `typing.Optional[str]` — Use `input_prompt` instead
@@ -6869,15 +2155,55 @@ client.lip_syncing.async_lipsync()
-**input_audio:** `typing.Optional[str]` +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+ + + + + + +
+ +
client.copilot_integrations.video_bots_stream(...) +
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from gooey import Gooey + +client = Gooey( + api_key="YOUR_API_KEY", +) +client.copilot_integrations.video_bots_stream( + request_id="request_id", +) + +``` +
+
+
+
+ +#### ⚙️ Parameters
-**settings:** `typing.Optional[RunSettings]` +
+
+ +**request_id:** `str`
diff --git a/src/gooey/__init__.py b/src/gooey/__init__.py index fd8646c..7ddc524 100644 --- a/src/gooey/__init__.py +++ b/src/gooey/__init__.py @@ -10,6 +10,9 @@ AsrOutputJson, AsrPageOutput, AsrPageOutputOutputTextItem, + AsrPageRequest, + AsrPageRequestFunctionsItem, + AsrPageRequestFunctionsItemTrigger, AsrPageRequestOutputFormat, AsrPageRequestSelectedModel, AsrPageRequestTranslationModel, @@ -18,8 +21,19 @@ BalanceResponse, BotBroadcastFilters, BulkEvalPageOutput, + BulkEvalPageRequest, + BulkEvalPageRequestAggFunctionsItem, + BulkEvalPageRequestAggFunctionsItemFunction, + BulkEvalPageRequestEvalPromptsItem, + BulkEvalPageRequestFunctionsItem, + BulkEvalPageRequestFunctionsItemTrigger, + BulkEvalPageRequestResponseFormatType, + BulkEvalPageRequestSelectedModel, BulkEvalPageStatusResponse, BulkRunnerPageOutput, + BulkRunnerPageRequest, + BulkRunnerPageRequestFunctionsItem, + BulkRunnerPageRequestFunctionsItemTrigger, BulkRunnerPageStatusResponse, ButtonPressed, CalledFunctionResponse, @@ -28,16 +42,27 @@ ChatCompletionContentPartTextParam, ChyronPlantPageOutput, ChyronPlantPageRequest, + ChyronPlantPageRequestFunctionsItem, + ChyronPlantPageRequestFunctionsItemTrigger, ChyronPlantPageStatusResponse, CompareLlmPageOutput, + CompareLlmPageRequest, + CompareLlmPageRequestFunctionsItem, + CompareLlmPageRequestFunctionsItemTrigger, CompareLlmPageRequestResponseFormatType, CompareLlmPageRequestSelectedModelsItem, CompareLlmPageStatusResponse, CompareText2ImgPageOutput, + CompareText2ImgPageRequest, + CompareText2ImgPageRequestFunctionsItem, + CompareText2ImgPageRequestFunctionsItemTrigger, CompareText2ImgPageRequestScheduler, CompareText2ImgPageRequestSelectedModelsItem, CompareText2ImgPageStatusResponse, CompareUpscalerPageOutput, + CompareUpscalerPageRequest, + CompareUpscalerPageRequestFunctionsItem, + CompareUpscalerPageRequestFunctionsItemTrigger, CompareUpscalerPageRequestSelectedModelsItem, CompareUpscalerPageStatusResponse, ConsoleLogs, @@ -51,79 +76,135 @@ ConversationStart, CreateStreamResponse, DeforumSdPageOutput, + DeforumSdPageRequest, + DeforumSdPageRequestAnimationPromptsItem, + DeforumSdPageRequestFunctionsItem, + DeforumSdPageRequestFunctionsItemTrigger, DeforumSdPageRequestSelectedModel, DeforumSdPageStatusResponse, DocExtractPageOutput, + DocExtractPageRequest, + DocExtractPageRequestFunctionsItem, + DocExtractPageRequestFunctionsItemTrigger, DocExtractPageRequestResponseFormatType, DocExtractPageRequestSelectedAsrModel, DocExtractPageRequestSelectedModel, DocExtractPageStatusResponse, DocSearchPageOutput, + DocSearchPageRequest, DocSearchPageRequestCitationStyle, DocSearchPageRequestEmbeddingModel, + DocSearchPageRequestFunctionsItem, + DocSearchPageRequestFunctionsItemTrigger, DocSearchPageRequestKeywordQuery, DocSearchPageRequestResponseFormatType, DocSearchPageRequestSelectedModel, DocSearchPageStatusResponse, DocSummaryPageOutput, + DocSummaryPageRequest, + DocSummaryPageRequestFunctionsItem, + DocSummaryPageRequestFunctionsItemTrigger, DocSummaryPageRequestResponseFormatType, DocSummaryPageRequestSelectedAsrModel, DocSummaryPageRequestSelectedModel, DocSummaryPageStatusResponse, EmailFaceInpaintingPageOutput, + EmailFaceInpaintingPageRequest, + EmailFaceInpaintingPageRequestFunctionsItem, + EmailFaceInpaintingPageRequestFunctionsItemTrigger, EmailFaceInpaintingPageRequestSelectedModel, EmailFaceInpaintingPageStatusResponse, EmbeddingsPageOutput, + EmbeddingsPageRequest, + EmbeddingsPageRequestFunctionsItem, + EmbeddingsPageRequestFunctionsItemTrigger, EmbeddingsPageRequestSelectedModel, EmbeddingsPageStatusResponse, EvalPrompt, FaceInpaintingPageOutput, + FaceInpaintingPageRequest, + FaceInpaintingPageRequestFunctionsItem, + FaceInpaintingPageRequestFunctionsItemTrigger, FaceInpaintingPageRequestSelectedModel, FaceInpaintingPageStatusResponse, FinalResponse, FunctionsPageOutput, + FunctionsPageRequest, FunctionsPageStatusResponse, GenericErrorResponse, GenericErrorResponseDetail, GoogleGptPageOutput, + GoogleGptPageRequest, GoogleGptPageRequestEmbeddingModel, + GoogleGptPageRequestFunctionsItem, + GoogleGptPageRequestFunctionsItemTrigger, GoogleGptPageRequestResponseFormatType, GoogleGptPageRequestSelectedModel, GoogleGptPageStatusResponse, GoogleImageGenPageOutput, + GoogleImageGenPageRequest, + GoogleImageGenPageRequestFunctionsItem, + GoogleImageGenPageRequestFunctionsItemTrigger, GoogleImageGenPageRequestSelectedModel, GoogleImageGenPageStatusResponse, HttpValidationError, ImageSegmentationPageOutput, + ImageSegmentationPageRequest, + ImageSegmentationPageRequestFunctionsItem, + ImageSegmentationPageRequestFunctionsItemTrigger, ImageSegmentationPageRequestSelectedModel, ImageSegmentationPageStatusResponse, ImageUrl, ImageUrlDetail, Img2ImgPageOutput, + Img2ImgPageRequest, + Img2ImgPageRequestFunctionsItem, + Img2ImgPageRequestFunctionsItemTrigger, Img2ImgPageRequestSelectedControlnetModel, Img2ImgPageRequestSelectedControlnetModelItem, Img2ImgPageRequestSelectedModel, Img2ImgPageStatusResponse, LetterWriterPageOutput, LetterWriterPageRequest, + LetterWriterPageRequestExampleLettersItem, + LetterWriterPageRequestFunctionsItem, + LetterWriterPageRequestFunctionsItemTrigger, LetterWriterPageStatusResponse, LipsyncPageOutput, + LipsyncPageRequest, + LipsyncPageRequestFunctionsItem, + LipsyncPageRequestFunctionsItemTrigger, + LipsyncPageRequestSadtalkerSettings, + LipsyncPageRequestSadtalkerSettingsPreprocess, + LipsyncPageRequestSelectedModel, LipsyncPageStatusResponse, LipsyncTtsPageOutput, + LipsyncTtsPageRequest, + LipsyncTtsPageRequestFunctionsItem, + LipsyncTtsPageRequestFunctionsItemTrigger, LipsyncTtsPageRequestOpenaiTtsModel, LipsyncTtsPageRequestOpenaiVoiceName, + LipsyncTtsPageRequestSadtalkerSettings, + LipsyncTtsPageRequestSadtalkerSettingsPreprocess, LipsyncTtsPageRequestSelectedModel, LipsyncTtsPageRequestTtsProvider, LipsyncTtsPageStatusResponse, LlmTools, MessagePart, ObjectInpaintingPageOutput, + ObjectInpaintingPageRequest, + ObjectInpaintingPageRequestFunctionsItem, + ObjectInpaintingPageRequestFunctionsItemTrigger, ObjectInpaintingPageRequestSelectedModel, ObjectInpaintingPageStatusResponse, PromptTreeNode, PromptTreeNodePrompt, QrCodeGeneratorPageOutput, + QrCodeGeneratorPageRequest, + QrCodeGeneratorPageRequestFunctionsItem, + QrCodeGeneratorPageRequestFunctionsItemTrigger, QrCodeGeneratorPageRequestImagePromptControlnetModelsItem, + QrCodeGeneratorPageRequestQrCodeVcard, QrCodeGeneratorPageRequestScheduler, QrCodeGeneratorPageRequestSelectedControlnetModelItem, QrCodeGeneratorPageRequestSelectedModel, @@ -134,14 +215,20 @@ RelatedDocSearchResponse, RelatedGoogleGptResponse, RelatedQnADocPageOutput, + RelatedQnADocPageRequest, RelatedQnADocPageRequestCitationStyle, RelatedQnADocPageRequestEmbeddingModel, + RelatedQnADocPageRequestFunctionsItem, + RelatedQnADocPageRequestFunctionsItemTrigger, RelatedQnADocPageRequestKeywordQuery, RelatedQnADocPageRequestResponseFormatType, RelatedQnADocPageRequestSelectedModel, RelatedQnADocPageStatusResponse, RelatedQnAPageOutput, + RelatedQnAPageRequest, RelatedQnAPageRequestEmbeddingModel, + RelatedQnAPageRequestFunctionsItem, + RelatedQnAPageRequestFunctionsItemTrigger, RelatedQnAPageRequestResponseFormatType, RelatedQnAPageRequestSelectedModel, RelatedQnAPageStatusResponse, @@ -156,27 +243,45 @@ SadTalkerSettingsPreprocess, SearchReference, SeoSummaryPageOutput, + SeoSummaryPageRequest, SeoSummaryPageRequestResponseFormatType, SeoSummaryPageRequestSelectedModel, SeoSummaryPageStatusResponse, SerpSearchLocation, SerpSearchType, SmartGptPageOutput, + SmartGptPageRequest, + SmartGptPageRequestFunctionsItem, + SmartGptPageRequestFunctionsItemTrigger, + SmartGptPageRequestResponseFormatType, + SmartGptPageRequestSelectedModel, SmartGptPageStatusResponse, SocialLookupEmailPageOutput, + SocialLookupEmailPageRequest, + SocialLookupEmailPageRequestFunctionsItem, + SocialLookupEmailPageRequestFunctionsItemTrigger, SocialLookupEmailPageRequestResponseFormatType, SocialLookupEmailPageRequestSelectedModel, SocialLookupEmailPageStatusResponse, StreamError, Text2AudioPageOutput, + Text2AudioPageRequest, + Text2AudioPageRequestFunctionsItem, + Text2AudioPageRequestFunctionsItemTrigger, Text2AudioPageStatusResponse, TextToSpeechPageOutput, + TextToSpeechPageRequest, + TextToSpeechPageRequestFunctionsItem, + TextToSpeechPageRequestFunctionsItemTrigger, TextToSpeechPageRequestOpenaiTtsModel, TextToSpeechPageRequestOpenaiVoiceName, TextToSpeechPageRequestTtsProvider, TextToSpeechPageStatusResponse, TrainingDataModel, TranslationPageOutput, + TranslationPageRequest, + TranslationPageRequestFunctionsItem, + TranslationPageRequestFunctionsItemTrigger, TranslationPageRequestSelectedModel, TranslationPageStatusResponse, ValidationError, @@ -185,23 +290,32 @@ VideoBotsPageOutput, VideoBotsPageOutputFinalKeywordQuery, VideoBotsPageOutputFinalPrompt, - VideoBotsPageStatusResponse, -) -from .errors import PaymentRequiredError, TooManyRequestsError, UnprocessableEntityError -from . import copilot_for_your_enterprise, copilot_integrations, evaluator, functions, lip_syncing, misc, smart_gpt -from .client import AsyncGooey, Gooey -from .copilot_for_your_enterprise import ( + VideoBotsPageRequest, VideoBotsPageRequestAsrModel, VideoBotsPageRequestCitationStyle, VideoBotsPageRequestEmbeddingModel, + VideoBotsPageRequestFunctionsItem, + VideoBotsPageRequestFunctionsItemTrigger, VideoBotsPageRequestLipsyncModel, + VideoBotsPageRequestMessagesItem, + VideoBotsPageRequestMessagesItemContent, + VideoBotsPageRequestMessagesItemContentItem, + VideoBotsPageRequestMessagesItemContentItem_ImageUrl, + VideoBotsPageRequestMessagesItemContentItem_Text, + VideoBotsPageRequestMessagesItemRole, VideoBotsPageRequestOpenaiTtsModel, VideoBotsPageRequestOpenaiVoiceName, VideoBotsPageRequestResponseFormatType, + VideoBotsPageRequestSadtalkerSettings, + VideoBotsPageRequestSadtalkerSettingsPreprocess, VideoBotsPageRequestSelectedModel, VideoBotsPageRequestTranslationModel, VideoBotsPageRequestTtsProvider, + VideoBotsPageStatusResponse, ) +from .errors import PaymentRequiredError, UnprocessableEntityError +from . import copilot_integrations, misc +from .client import AsyncGooey, Gooey from .copilot_integrations import ( CreateStreamRequestAsrModel, CreateStreamRequestCitationStyle, @@ -216,9 +330,6 @@ VideoBotsStreamResponse, ) from .environment import GooeyEnvironment -from .evaluator import BulkEvalPageRequestResponseFormatType, BulkEvalPageRequestSelectedModel -from .lip_syncing import LipsyncPageRequestSelectedModel -from .smart_gpt import SmartGptPageRequestResponseFormatType, SmartGptPageRequestSelectedModel from .version import __version__ __all__ = [ @@ -231,6 +342,9 @@ "AsrOutputJson", "AsrPageOutput", "AsrPageOutputOutputTextItem", + "AsrPageRequest", + "AsrPageRequestFunctionsItem", + "AsrPageRequestFunctionsItemTrigger", "AsrPageRequestOutputFormat", "AsrPageRequestSelectedModel", "AsrPageRequestTranslationModel", @@ -240,10 +354,19 @@ "BalanceResponse", "BotBroadcastFilters", "BulkEvalPageOutput", + "BulkEvalPageRequest", + "BulkEvalPageRequestAggFunctionsItem", + "BulkEvalPageRequestAggFunctionsItemFunction", + "BulkEvalPageRequestEvalPromptsItem", + "BulkEvalPageRequestFunctionsItem", + "BulkEvalPageRequestFunctionsItemTrigger", "BulkEvalPageRequestResponseFormatType", "BulkEvalPageRequestSelectedModel", "BulkEvalPageStatusResponse", "BulkRunnerPageOutput", + "BulkRunnerPageRequest", + "BulkRunnerPageRequestFunctionsItem", + "BulkRunnerPageRequestFunctionsItemTrigger", "BulkRunnerPageStatusResponse", "ButtonPressed", "CalledFunctionResponse", @@ -252,16 +375,27 @@ "ChatCompletionContentPartTextParam", "ChyronPlantPageOutput", "ChyronPlantPageRequest", + "ChyronPlantPageRequestFunctionsItem", + "ChyronPlantPageRequestFunctionsItemTrigger", "ChyronPlantPageStatusResponse", "CompareLlmPageOutput", + "CompareLlmPageRequest", + "CompareLlmPageRequestFunctionsItem", + "CompareLlmPageRequestFunctionsItemTrigger", "CompareLlmPageRequestResponseFormatType", "CompareLlmPageRequestSelectedModelsItem", "CompareLlmPageStatusResponse", "CompareText2ImgPageOutput", + "CompareText2ImgPageRequest", + "CompareText2ImgPageRequestFunctionsItem", + "CompareText2ImgPageRequestFunctionsItemTrigger", "CompareText2ImgPageRequestScheduler", "CompareText2ImgPageRequestSelectedModelsItem", "CompareText2ImgPageStatusResponse", "CompareUpscalerPageOutput", + "CompareUpscalerPageRequest", + "CompareUpscalerPageRequestFunctionsItem", + "CompareUpscalerPageRequestFunctionsItemTrigger", "CompareUpscalerPageRequestSelectedModelsItem", "CompareUpscalerPageStatusResponse", "ConsoleLogs", @@ -285,83 +419,138 @@ "CreateStreamRequestTtsProvider", "CreateStreamResponse", "DeforumSdPageOutput", + "DeforumSdPageRequest", + "DeforumSdPageRequestAnimationPromptsItem", + "DeforumSdPageRequestFunctionsItem", + "DeforumSdPageRequestFunctionsItemTrigger", "DeforumSdPageRequestSelectedModel", "DeforumSdPageStatusResponse", "DocExtractPageOutput", + "DocExtractPageRequest", + "DocExtractPageRequestFunctionsItem", + "DocExtractPageRequestFunctionsItemTrigger", "DocExtractPageRequestResponseFormatType", "DocExtractPageRequestSelectedAsrModel", "DocExtractPageRequestSelectedModel", "DocExtractPageStatusResponse", "DocSearchPageOutput", + "DocSearchPageRequest", "DocSearchPageRequestCitationStyle", "DocSearchPageRequestEmbeddingModel", + "DocSearchPageRequestFunctionsItem", + "DocSearchPageRequestFunctionsItemTrigger", "DocSearchPageRequestKeywordQuery", "DocSearchPageRequestResponseFormatType", "DocSearchPageRequestSelectedModel", "DocSearchPageStatusResponse", "DocSummaryPageOutput", + "DocSummaryPageRequest", + "DocSummaryPageRequestFunctionsItem", + "DocSummaryPageRequestFunctionsItemTrigger", "DocSummaryPageRequestResponseFormatType", "DocSummaryPageRequestSelectedAsrModel", "DocSummaryPageRequestSelectedModel", "DocSummaryPageStatusResponse", "EmailFaceInpaintingPageOutput", + "EmailFaceInpaintingPageRequest", + "EmailFaceInpaintingPageRequestFunctionsItem", + "EmailFaceInpaintingPageRequestFunctionsItemTrigger", "EmailFaceInpaintingPageRequestSelectedModel", "EmailFaceInpaintingPageStatusResponse", "EmbeddingsPageOutput", + "EmbeddingsPageRequest", + "EmbeddingsPageRequestFunctionsItem", + "EmbeddingsPageRequestFunctionsItemTrigger", "EmbeddingsPageRequestSelectedModel", "EmbeddingsPageStatusResponse", "EvalPrompt", "FaceInpaintingPageOutput", + "FaceInpaintingPageRequest", + "FaceInpaintingPageRequestFunctionsItem", + "FaceInpaintingPageRequestFunctionsItemTrigger", "FaceInpaintingPageRequestSelectedModel", "FaceInpaintingPageStatusResponse", "FinalResponse", "FunctionsPageOutput", + "FunctionsPageRequest", "FunctionsPageStatusResponse", "GenericErrorResponse", "GenericErrorResponseDetail", "Gooey", "GooeyEnvironment", "GoogleGptPageOutput", + "GoogleGptPageRequest", "GoogleGptPageRequestEmbeddingModel", + "GoogleGptPageRequestFunctionsItem", + "GoogleGptPageRequestFunctionsItemTrigger", "GoogleGptPageRequestResponseFormatType", "GoogleGptPageRequestSelectedModel", "GoogleGptPageStatusResponse", "GoogleImageGenPageOutput", + "GoogleImageGenPageRequest", + "GoogleImageGenPageRequestFunctionsItem", + "GoogleImageGenPageRequestFunctionsItemTrigger", "GoogleImageGenPageRequestSelectedModel", "GoogleImageGenPageStatusResponse", "HttpValidationError", "ImageSegmentationPageOutput", + "ImageSegmentationPageRequest", + "ImageSegmentationPageRequestFunctionsItem", + "ImageSegmentationPageRequestFunctionsItemTrigger", "ImageSegmentationPageRequestSelectedModel", "ImageSegmentationPageStatusResponse", "ImageUrl", "ImageUrlDetail", "Img2ImgPageOutput", + "Img2ImgPageRequest", + "Img2ImgPageRequestFunctionsItem", + "Img2ImgPageRequestFunctionsItemTrigger", "Img2ImgPageRequestSelectedControlnetModel", "Img2ImgPageRequestSelectedControlnetModelItem", "Img2ImgPageRequestSelectedModel", "Img2ImgPageStatusResponse", "LetterWriterPageOutput", "LetterWriterPageRequest", + "LetterWriterPageRequestExampleLettersItem", + "LetterWriterPageRequestFunctionsItem", + "LetterWriterPageRequestFunctionsItemTrigger", "LetterWriterPageStatusResponse", "LipsyncPageOutput", + "LipsyncPageRequest", + "LipsyncPageRequestFunctionsItem", + "LipsyncPageRequestFunctionsItemTrigger", + "LipsyncPageRequestSadtalkerSettings", + "LipsyncPageRequestSadtalkerSettingsPreprocess", "LipsyncPageRequestSelectedModel", "LipsyncPageStatusResponse", "LipsyncTtsPageOutput", + "LipsyncTtsPageRequest", + "LipsyncTtsPageRequestFunctionsItem", + "LipsyncTtsPageRequestFunctionsItemTrigger", "LipsyncTtsPageRequestOpenaiTtsModel", "LipsyncTtsPageRequestOpenaiVoiceName", + "LipsyncTtsPageRequestSadtalkerSettings", + "LipsyncTtsPageRequestSadtalkerSettingsPreprocess", "LipsyncTtsPageRequestSelectedModel", "LipsyncTtsPageRequestTtsProvider", "LipsyncTtsPageStatusResponse", "LlmTools", "MessagePart", "ObjectInpaintingPageOutput", + "ObjectInpaintingPageRequest", + "ObjectInpaintingPageRequestFunctionsItem", + "ObjectInpaintingPageRequestFunctionsItemTrigger", "ObjectInpaintingPageRequestSelectedModel", "ObjectInpaintingPageStatusResponse", "PaymentRequiredError", "PromptTreeNode", "PromptTreeNodePrompt", "QrCodeGeneratorPageOutput", + "QrCodeGeneratorPageRequest", + "QrCodeGeneratorPageRequestFunctionsItem", + "QrCodeGeneratorPageRequestFunctionsItemTrigger", "QrCodeGeneratorPageRequestImagePromptControlnetModelsItem", + "QrCodeGeneratorPageRequestQrCodeVcard", "QrCodeGeneratorPageRequestScheduler", "QrCodeGeneratorPageRequestSelectedControlnetModelItem", "QrCodeGeneratorPageRequestSelectedModel", @@ -372,14 +561,20 @@ "RelatedDocSearchResponse", "RelatedGoogleGptResponse", "RelatedQnADocPageOutput", + "RelatedQnADocPageRequest", "RelatedQnADocPageRequestCitationStyle", "RelatedQnADocPageRequestEmbeddingModel", + "RelatedQnADocPageRequestFunctionsItem", + "RelatedQnADocPageRequestFunctionsItemTrigger", "RelatedQnADocPageRequestKeywordQuery", "RelatedQnADocPageRequestResponseFormatType", "RelatedQnADocPageRequestSelectedModel", "RelatedQnADocPageStatusResponse", "RelatedQnAPageOutput", + "RelatedQnAPageRequest", "RelatedQnAPageRequestEmbeddingModel", + "RelatedQnAPageRequestFunctionsItem", + "RelatedQnAPageRequestFunctionsItemTrigger", "RelatedQnAPageRequestResponseFormatType", "RelatedQnAPageRequestSelectedModel", "RelatedQnAPageStatusResponse", @@ -394,30 +589,45 @@ "SadTalkerSettingsPreprocess", "SearchReference", "SeoSummaryPageOutput", + "SeoSummaryPageRequest", "SeoSummaryPageRequestResponseFormatType", "SeoSummaryPageRequestSelectedModel", "SeoSummaryPageStatusResponse", "SerpSearchLocation", "SerpSearchType", "SmartGptPageOutput", + "SmartGptPageRequest", + "SmartGptPageRequestFunctionsItem", + "SmartGptPageRequestFunctionsItemTrigger", "SmartGptPageRequestResponseFormatType", "SmartGptPageRequestSelectedModel", "SmartGptPageStatusResponse", "SocialLookupEmailPageOutput", + "SocialLookupEmailPageRequest", + "SocialLookupEmailPageRequestFunctionsItem", + "SocialLookupEmailPageRequestFunctionsItemTrigger", "SocialLookupEmailPageRequestResponseFormatType", "SocialLookupEmailPageRequestSelectedModel", "SocialLookupEmailPageStatusResponse", "StreamError", "Text2AudioPageOutput", + "Text2AudioPageRequest", + "Text2AudioPageRequestFunctionsItem", + "Text2AudioPageRequestFunctionsItemTrigger", "Text2AudioPageStatusResponse", "TextToSpeechPageOutput", + "TextToSpeechPageRequest", + "TextToSpeechPageRequestFunctionsItem", + "TextToSpeechPageRequestFunctionsItemTrigger", "TextToSpeechPageRequestOpenaiTtsModel", "TextToSpeechPageRequestOpenaiVoiceName", "TextToSpeechPageRequestTtsProvider", "TextToSpeechPageStatusResponse", - "TooManyRequestsError", "TrainingDataModel", "TranslationPageOutput", + "TranslationPageRequest", + "TranslationPageRequestFunctionsItem", + "TranslationPageRequestFunctionsItemTrigger", "TranslationPageRequestSelectedModel", "TranslationPageStatusResponse", "UnprocessableEntityError", @@ -427,24 +637,30 @@ "VideoBotsPageOutput", "VideoBotsPageOutputFinalKeywordQuery", "VideoBotsPageOutputFinalPrompt", + "VideoBotsPageRequest", "VideoBotsPageRequestAsrModel", "VideoBotsPageRequestCitationStyle", "VideoBotsPageRequestEmbeddingModel", + "VideoBotsPageRequestFunctionsItem", + "VideoBotsPageRequestFunctionsItemTrigger", "VideoBotsPageRequestLipsyncModel", + "VideoBotsPageRequestMessagesItem", + "VideoBotsPageRequestMessagesItemContent", + "VideoBotsPageRequestMessagesItemContentItem", + "VideoBotsPageRequestMessagesItemContentItem_ImageUrl", + "VideoBotsPageRequestMessagesItemContentItem_Text", + "VideoBotsPageRequestMessagesItemRole", "VideoBotsPageRequestOpenaiTtsModel", "VideoBotsPageRequestOpenaiVoiceName", "VideoBotsPageRequestResponseFormatType", + "VideoBotsPageRequestSadtalkerSettings", + "VideoBotsPageRequestSadtalkerSettingsPreprocess", "VideoBotsPageRequestSelectedModel", "VideoBotsPageRequestTranslationModel", "VideoBotsPageRequestTtsProvider", "VideoBotsPageStatusResponse", "VideoBotsStreamResponse", "__version__", - "copilot_for_your_enterprise", "copilot_integrations", - "evaluator", - "functions", - "lip_syncing", "misc", - "smart_gpt", ] diff --git a/src/gooey/client.py b/src/gooey/client.py index acf6501..c6ceaa1 100644 --- a/src/gooey/client.py +++ b/src/gooey/client.py @@ -7,150 +7,47 @@ from .core.api_error import ApiError from .core.client_wrapper import SyncClientWrapper from .copilot_integrations.client import CopilotIntegrationsClient -from .copilot_for_your_enterprise.client import CopilotForYourEnterpriseClient -from .evaluator.client import EvaluatorClient -from .smart_gpt.client import SmartGptClient -from .functions.client import FunctionsClient -from .lip_syncing.client import LipSyncingClient from .misc.client import MiscClient -from .types.animation_prompt import AnimationPrompt -from .types.recipe_function import RecipeFunction -from .types.deforum_sd_page_request_selected_model import DeforumSdPageRequestSelectedModel -from .types.run_settings import RunSettings from .core.request_options import RequestOptions -from .types.deforum_sd_page_output import DeforumSdPageOutput -from .types.deforum_sd_page_status_response import DeforumSdPageStatusResponse +from .types.video_bots_page_status_response import VideoBotsPageStatusResponse from .core.pydantic_utilities import parse_obj_as -from .errors.payment_required_error import PaymentRequiredError -from .errors.unprocessable_entity_error import UnprocessableEntityError -from .types.http_validation_error import HttpValidationError -from .errors.too_many_requests_error import TooManyRequestsError -from .types.generic_error_response import GenericErrorResponse from json.decoder import JSONDecodeError -from .types.vcard import Vcard -from .types.qr_code_generator_page_request_image_prompt_controlnet_models_item import ( - QrCodeGeneratorPageRequestImagePromptControlnetModelsItem, -) -from .types.qr_code_generator_page_request_selected_model import QrCodeGeneratorPageRequestSelectedModel -from .types.qr_code_generator_page_request_selected_controlnet_model_item import ( - QrCodeGeneratorPageRequestSelectedControlnetModelItem, -) -from .types.qr_code_generator_page_request_scheduler import QrCodeGeneratorPageRequestScheduler -from .types.qr_code_generator_page_output import QrCodeGeneratorPageOutput +from .types.deforum_sd_page_status_response import DeforumSdPageStatusResponse from .types.qr_code_generator_page_status_response import QrCodeGeneratorPageStatusResponse -from .types.related_qn_a_page_request_selected_model import RelatedQnAPageRequestSelectedModel -from .types.related_qn_a_page_request_embedding_model import RelatedQnAPageRequestEmbeddingModel -from .types.related_qn_a_page_request_response_format_type import RelatedQnAPageRequestResponseFormatType -from .types.serp_search_location import SerpSearchLocation -from .types.serp_search_type import SerpSearchType -from .types.related_qn_a_page_output import RelatedQnAPageOutput from .types.related_qn_a_page_status_response import RelatedQnAPageStatusResponse -from .types.seo_summary_page_request_selected_model import SeoSummaryPageRequestSelectedModel -from .types.seo_summary_page_request_response_format_type import SeoSummaryPageRequestResponseFormatType -from .types.seo_summary_page_output import SeoSummaryPageOutput from .types.seo_summary_page_status_response import SeoSummaryPageStatusResponse -from .types.google_gpt_page_request_selected_model import GoogleGptPageRequestSelectedModel -from .types.google_gpt_page_request_embedding_model import GoogleGptPageRequestEmbeddingModel -from .types.google_gpt_page_request_response_format_type import GoogleGptPageRequestResponseFormatType -from .types.google_gpt_page_output import GoogleGptPageOutput from .types.google_gpt_page_status_response import GoogleGptPageStatusResponse -from .types.social_lookup_email_page_request_selected_model import SocialLookupEmailPageRequestSelectedModel -from .types.social_lookup_email_page_request_response_format_type import SocialLookupEmailPageRequestResponseFormatType -from .types.social_lookup_email_page_output import SocialLookupEmailPageOutput from .types.social_lookup_email_page_status_response import SocialLookupEmailPageStatusResponse -from .types.bulk_runner_page_output import BulkRunnerPageOutput from .types.bulk_runner_page_status_response import BulkRunnerPageStatusResponse -from .types.doc_extract_page_request_selected_asr_model import DocExtractPageRequestSelectedAsrModel -from .types.doc_extract_page_request_selected_model import DocExtractPageRequestSelectedModel -from .types.doc_extract_page_request_response_format_type import DocExtractPageRequestResponseFormatType -from .types.doc_extract_page_output import DocExtractPageOutput +from .types.bulk_eval_page_status_response import BulkEvalPageStatusResponse from .types.doc_extract_page_status_response import DocExtractPageStatusResponse -from .types.compare_llm_page_request_selected_models_item import CompareLlmPageRequestSelectedModelsItem -from .types.compare_llm_page_request_response_format_type import CompareLlmPageRequestResponseFormatType -from .types.compare_llm_page_output import CompareLlmPageOutput from .types.compare_llm_page_status_response import CompareLlmPageStatusResponse -from .types.doc_search_page_request_keyword_query import DocSearchPageRequestKeywordQuery -from .types.doc_search_page_request_embedding_model import DocSearchPageRequestEmbeddingModel -from .types.doc_search_page_request_selected_model import DocSearchPageRequestSelectedModel -from .types.doc_search_page_request_citation_style import DocSearchPageRequestCitationStyle -from .types.doc_search_page_request_response_format_type import DocSearchPageRequestResponseFormatType -from .types.doc_search_page_output import DocSearchPageOutput from .types.doc_search_page_status_response import DocSearchPageStatusResponse -from .types.doc_summary_page_request_selected_model import DocSummaryPageRequestSelectedModel -from .types.doc_summary_page_request_selected_asr_model import DocSummaryPageRequestSelectedAsrModel -from .types.doc_summary_page_request_response_format_type import DocSummaryPageRequestResponseFormatType -from .types.doc_summary_page_output import DocSummaryPageOutput +from .types.smart_gpt_page_status_response import SmartGptPageStatusResponse from .types.doc_summary_page_status_response import DocSummaryPageStatusResponse -from .types.lipsync_tts_page_request_tts_provider import LipsyncTtsPageRequestTtsProvider -from .types.lipsync_tts_page_request_openai_voice_name import LipsyncTtsPageRequestOpenaiVoiceName -from .types.lipsync_tts_page_request_openai_tts_model import LipsyncTtsPageRequestOpenaiTtsModel -from .types.sad_talker_settings import SadTalkerSettings -from .types.lipsync_tts_page_request_selected_model import LipsyncTtsPageRequestSelectedModel -from .types.lipsync_tts_page_output import LipsyncTtsPageOutput +from .types.functions_page_status_response import FunctionsPageStatusResponse +from .types.lipsync_page_status_response import LipsyncPageStatusResponse from .types.lipsync_tts_page_status_response import LipsyncTtsPageStatusResponse -from .types.text_to_speech_page_request_tts_provider import TextToSpeechPageRequestTtsProvider -from .types.text_to_speech_page_request_openai_voice_name import TextToSpeechPageRequestOpenaiVoiceName -from .types.text_to_speech_page_request_openai_tts_model import TextToSpeechPageRequestOpenaiTtsModel -from .types.text_to_speech_page_output import TextToSpeechPageOutput from .types.text_to_speech_page_status_response import TextToSpeechPageStatusResponse -from .types.asr_page_request_selected_model import AsrPageRequestSelectedModel -from .types.asr_page_request_translation_model import AsrPageRequestTranslationModel -from .types.asr_page_request_output_format import AsrPageRequestOutputFormat -from .types.asr_page_output import AsrPageOutput from .types.asr_page_status_response import AsrPageStatusResponse -from .types.text2audio_page_output import Text2AudioPageOutput from .types.text2audio_page_status_response import Text2AudioPageStatusResponse -from .types.translation_page_request_selected_model import TranslationPageRequestSelectedModel -from .types.translation_page_output import TranslationPageOutput from .types.translation_page_status_response import TranslationPageStatusResponse -from .types.img2img_page_request_selected_model import Img2ImgPageRequestSelectedModel -from .types.img2img_page_request_selected_controlnet_model import Img2ImgPageRequestSelectedControlnetModel -from .types.img2img_page_output import Img2ImgPageOutput from .types.img2img_page_status_response import Img2ImgPageStatusResponse -from .types.compare_text2img_page_request_selected_models_item import CompareText2ImgPageRequestSelectedModelsItem -from .types.compare_text2img_page_request_scheduler import CompareText2ImgPageRequestScheduler -from .types.compare_text2img_page_output import CompareText2ImgPageOutput from .types.compare_text2img_page_status_response import CompareText2ImgPageStatusResponse -from .types.object_inpainting_page_request_selected_model import ObjectInpaintingPageRequestSelectedModel -from .types.object_inpainting_page_output import ObjectInpaintingPageOutput from .types.object_inpainting_page_status_response import ObjectInpaintingPageStatusResponse -from .types.face_inpainting_page_request_selected_model import FaceInpaintingPageRequestSelectedModel -from .types.face_inpainting_page_output import FaceInpaintingPageOutput from .types.face_inpainting_page_status_response import FaceInpaintingPageStatusResponse -from .types.email_face_inpainting_page_request_selected_model import EmailFaceInpaintingPageRequestSelectedModel -from .types.email_face_inpainting_page_output import EmailFaceInpaintingPageOutput from .types.email_face_inpainting_page_status_response import EmailFaceInpaintingPageStatusResponse -from .types.google_image_gen_page_request_selected_model import GoogleImageGenPageRequestSelectedModel -from .types.google_image_gen_page_output import GoogleImageGenPageOutput from .types.google_image_gen_page_status_response import GoogleImageGenPageStatusResponse -from .types.image_segmentation_page_request_selected_model import ImageSegmentationPageRequestSelectedModel -from .types.image_segmentation_page_output import ImageSegmentationPageOutput from .types.image_segmentation_page_status_response import ImageSegmentationPageStatusResponse -from .types.compare_upscaler_page_request_selected_models_item import CompareUpscalerPageRequestSelectedModelsItem -from .types.compare_upscaler_page_output import CompareUpscalerPageOutput from .types.compare_upscaler_page_status_response import CompareUpscalerPageStatusResponse -from .types.embeddings_page_request_selected_model import EmbeddingsPageRequestSelectedModel -from .types.embeddings_page_output import EmbeddingsPageOutput +from .types.chyron_plant_page_status_response import ChyronPlantPageStatusResponse +from .types.letter_writer_page_status_response import LetterWriterPageStatusResponse from .types.embeddings_page_status_response import EmbeddingsPageStatusResponse -from .types.related_qn_a_doc_page_request_keyword_query import RelatedQnADocPageRequestKeywordQuery -from .types.related_qn_a_doc_page_request_embedding_model import RelatedQnADocPageRequestEmbeddingModel -from .types.related_qn_a_doc_page_request_selected_model import RelatedQnADocPageRequestSelectedModel -from .types.related_qn_a_doc_page_request_citation_style import RelatedQnADocPageRequestCitationStyle -from .types.related_qn_a_doc_page_request_response_format_type import RelatedQnADocPageRequestResponseFormatType -from .types.related_qn_a_doc_page_output import RelatedQnADocPageOutput from .types.related_qn_a_doc_page_status_response import RelatedQnADocPageStatusResponse from .core.client_wrapper import AsyncClientWrapper from .copilot_integrations.client import AsyncCopilotIntegrationsClient -from .copilot_for_your_enterprise.client import AsyncCopilotForYourEnterpriseClient -from .evaluator.client import AsyncEvaluatorClient -from .smart_gpt.client import AsyncSmartGptClient -from .functions.client import AsyncFunctionsClient -from .lip_syncing.client import AsyncLipSyncingClient from .misc.client import AsyncMiscClient -# this is used as the default value for optional parameters -OMIT = typing.cast(typing.Any, ...) - class Gooey: """ @@ -213,274 +110,104 @@ def __init__( timeout=_defaulted_timeout, ) self.copilot_integrations = CopilotIntegrationsClient(client_wrapper=self._client_wrapper) - self.copilot_for_your_enterprise = CopilotForYourEnterpriseClient(client_wrapper=self._client_wrapper) - self.evaluator = EvaluatorClient(client_wrapper=self._client_wrapper) - self.smart_gpt = SmartGptClient(client_wrapper=self._client_wrapper) - self.functions = FunctionsClient(client_wrapper=self._client_wrapper) - self.lip_syncing = LipSyncingClient(client_wrapper=self._client_wrapper) self.misc = MiscClient(client_wrapper=self._client_wrapper) - def animate( - self, - *, - animation_prompts: typing.Sequence[AnimationPrompt], - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - max_frames: typing.Optional[int] = OMIT, - selected_model: typing.Optional[DeforumSdPageRequestSelectedModel] = OMIT, - animation_mode: typing.Optional[str] = OMIT, - zoom: typing.Optional[str] = OMIT, - translation_x: typing.Optional[str] = OMIT, - translation_y: typing.Optional[str] = OMIT, - rotation3d_x: typing.Optional[str] = OMIT, - rotation3d_y: typing.Optional[str] = OMIT, - rotation3d_z: typing.Optional[str] = OMIT, - fps: typing.Optional[int] = OMIT, - seed: typing.Optional[int] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[DeforumSdPageOutput]: + def post_v3video_bots_async( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> VideoBotsPageStatusResponse: """ Parameters ---------- - animation_prompts : typing.Sequence[AnimationPrompt] - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - max_frames : typing.Optional[int] - - selected_model : typing.Optional[DeforumSdPageRequestSelectedModel] - - animation_mode : typing.Optional[str] - - zoom : typing.Optional[str] - - translation_x : typing.Optional[str] - - translation_y : typing.Optional[str] - - rotation3d_x : typing.Optional[str] - - rotation3d_y : typing.Optional[str] - - rotation3d_z : typing.Optional[str] + request_options : typing.Optional[RequestOptions] + Request-specific configuration. - fps : typing.Optional[int] + Returns + ------- + VideoBotsPageStatusResponse + Successful Response - seed : typing.Optional[int] + Examples + -------- + from gooey import Gooey - settings : typing.Optional[RunSettings] + client = Gooey( + api_key="YOUR_API_KEY", + ) + client.post_v3video_bots_async() + """ + _response = self._client_wrapper.httpx_client.request( + "v3/video-bots/async", + method="POST", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + VideoBotsPageStatusResponse, + parse_obj_as( + type_=VideoBotsPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + def post_v3deforum_sd_async( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> DeforumSdPageStatusResponse: + """ + Parameters + ---------- request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Optional[DeforumSdPageOutput] + DeforumSdPageStatusResponse Successful Response Examples -------- - from gooey import AnimationPrompt, Gooey + from gooey import Gooey client = Gooey( api_key="YOUR_API_KEY", ) - client.animate( - animation_prompts=[ - AnimationPrompt( - frame="frame", - prompt="prompt", - ) - ], - ) + client.post_v3deforum_sd_async() """ _response = self._client_wrapper.httpx_client.request( "v3/DeforumSD/async", method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "animation_prompts": animation_prompts, - "max_frames": max_frames, - "selected_model": selected_model, - "animation_mode": animation_mode, - "zoom": zoom, - "translation_x": translation_x, - "translation_y": translation_y, - "rotation_3d_x": rotation3d_x, - "rotation_3d_y": rotation3d_y, - "rotation_3d_z": rotation3d_z, - "fps": fps, - "seed": seed, - "settings": settings, - }, request_options=request_options, - omit=OMIT, ) try: if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( + return typing.cast( DeforumSdPageStatusResponse, parse_obj_as( type_=DeforumSdPageStatusResponse, # type: ignore object_=_response.json(), ), ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def qr_code( - self, - *, - text_prompt: str, - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - qr_code_data: typing.Optional[str] = OMIT, - qr_code_input_image: typing.Optional[str] = OMIT, - qr_code_vcard: typing.Optional[Vcard] = OMIT, - qr_code_file: typing.Optional[str] = OMIT, - use_url_shortener: typing.Optional[bool] = OMIT, - negative_prompt: typing.Optional[str] = OMIT, - image_prompt: typing.Optional[str] = OMIT, - image_prompt_controlnet_models: typing.Optional[ - typing.Sequence[QrCodeGeneratorPageRequestImagePromptControlnetModelsItem] - ] = OMIT, - image_prompt_strength: typing.Optional[float] = OMIT, - image_prompt_scale: typing.Optional[float] = OMIT, - image_prompt_pos_x: typing.Optional[float] = OMIT, - image_prompt_pos_y: typing.Optional[float] = OMIT, - selected_model: typing.Optional[QrCodeGeneratorPageRequestSelectedModel] = OMIT, - selected_controlnet_model: typing.Optional[ - typing.Sequence[QrCodeGeneratorPageRequestSelectedControlnetModelItem] - ] = OMIT, - output_width: typing.Optional[int] = OMIT, - output_height: typing.Optional[int] = OMIT, - guidance_scale: typing.Optional[float] = OMIT, - controlnet_conditioning_scale: typing.Optional[typing.Sequence[float]] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[int] = OMIT, - scheduler: typing.Optional[QrCodeGeneratorPageRequestScheduler] = OMIT, - seed: typing.Optional[int] = OMIT, - obj_scale: typing.Optional[float] = OMIT, - obj_pos_x: typing.Optional[float] = OMIT, - obj_pos_y: typing.Optional[float] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[QrCodeGeneratorPageOutput]: + def post_v3art_qr_code_async( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> QrCodeGeneratorPageStatusResponse: """ Parameters ---------- - text_prompt : str - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - qr_code_data : typing.Optional[str] - - qr_code_input_image : typing.Optional[str] - - qr_code_vcard : typing.Optional[Vcard] - - qr_code_file : typing.Optional[str] - - use_url_shortener : typing.Optional[bool] - - negative_prompt : typing.Optional[str] - - image_prompt : typing.Optional[str] - - image_prompt_controlnet_models : typing.Optional[typing.Sequence[QrCodeGeneratorPageRequestImagePromptControlnetModelsItem]] - - image_prompt_strength : typing.Optional[float] - - image_prompt_scale : typing.Optional[float] - - image_prompt_pos_x : typing.Optional[float] - - image_prompt_pos_y : typing.Optional[float] - - selected_model : typing.Optional[QrCodeGeneratorPageRequestSelectedModel] - - selected_controlnet_model : typing.Optional[typing.Sequence[QrCodeGeneratorPageRequestSelectedControlnetModelItem]] - - output_width : typing.Optional[int] - - output_height : typing.Optional[int] - - guidance_scale : typing.Optional[float] - - controlnet_conditioning_scale : typing.Optional[typing.Sequence[float]] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[int] - - scheduler : typing.Optional[QrCodeGeneratorPageRequestScheduler] - - seed : typing.Optional[int] - - obj_scale : typing.Optional[float] - - obj_pos_x : typing.Optional[float] - - obj_pos_y : typing.Optional[float] - - settings : typing.Optional[RunSettings] - request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Optional[QrCodeGeneratorPageOutput] + QrCodeGeneratorPageStatusResponse Successful Response Examples @@ -490,191 +217,39 @@ def qr_code( client = Gooey( api_key="YOUR_API_KEY", ) - client.qr_code( - text_prompt="text_prompt", - ) + client.post_v3art_qr_code_async() """ _response = self._client_wrapper.httpx_client.request( "v3/art-qr-code/async", method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "qr_code_data": qr_code_data, - "qr_code_input_image": qr_code_input_image, - "qr_code_vcard": qr_code_vcard, - "qr_code_file": qr_code_file, - "use_url_shortener": use_url_shortener, - "text_prompt": text_prompt, - "negative_prompt": negative_prompt, - "image_prompt": image_prompt, - "image_prompt_controlnet_models": image_prompt_controlnet_models, - "image_prompt_strength": image_prompt_strength, - "image_prompt_scale": image_prompt_scale, - "image_prompt_pos_x": image_prompt_pos_x, - "image_prompt_pos_y": image_prompt_pos_y, - "selected_model": selected_model, - "selected_controlnet_model": selected_controlnet_model, - "output_width": output_width, - "output_height": output_height, - "guidance_scale": guidance_scale, - "controlnet_conditioning_scale": controlnet_conditioning_scale, - "num_outputs": num_outputs, - "quality": quality, - "scheduler": scheduler, - "seed": seed, - "obj_scale": obj_scale, - "obj_pos_x": obj_pos_x, - "obj_pos_y": obj_pos_y, - "settings": settings, - }, request_options=request_options, - omit=OMIT, ) try: if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( + return typing.cast( QrCodeGeneratorPageStatusResponse, parse_obj_as( type_=QrCodeGeneratorPageStatusResponse, # type: ignore object_=_response.json(), ), ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def seo_people_also_ask( - self, - *, - search_query: str, - site_filter: str, - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - task_instructions: typing.Optional[str] = OMIT, - query_instructions: typing.Optional[str] = OMIT, - selected_model: typing.Optional[RelatedQnAPageRequestSelectedModel] = OMIT, - max_search_urls: typing.Optional[int] = OMIT, - max_references: typing.Optional[int] = OMIT, - max_context_words: typing.Optional[int] = OMIT, - scroll_jump: typing.Optional[int] = OMIT, - embedding_model: typing.Optional[RelatedQnAPageRequestEmbeddingModel] = OMIT, - dense_weight: typing.Optional[float] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[RelatedQnAPageRequestResponseFormatType] = OMIT, - serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, - scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, - serp_search_type: typing.Optional[SerpSearchType] = OMIT, - scaleserp_search_field: typing.Optional[str] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[RelatedQnAPageOutput]: + def post_v3related_qna_maker_async( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> RelatedQnAPageStatusResponse: """ Parameters ---------- - search_query : str - - site_filter : str - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - task_instructions : typing.Optional[str] - - query_instructions : typing.Optional[str] - - selected_model : typing.Optional[RelatedQnAPageRequestSelectedModel] - - max_search_urls : typing.Optional[int] - - max_references : typing.Optional[int] - - max_context_words : typing.Optional[int] - - scroll_jump : typing.Optional[int] - - embedding_model : typing.Optional[RelatedQnAPageRequestEmbeddingModel] - - dense_weight : typing.Optional[float] - - Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. - Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - response_format_type : typing.Optional[RelatedQnAPageRequestResponseFormatType] - - serp_search_location : typing.Optional[SerpSearchLocation] - - scaleserp_locations : typing.Optional[typing.Sequence[str]] - DEPRECATED: use `serp_search_location` instead - - serp_search_type : typing.Optional[SerpSearchType] - - scaleserp_search_field : typing.Optional[str] - DEPRECATED: use `serp_search_type` instead - - settings : typing.Optional[RunSettings] - request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Optional[RelatedQnAPageOutput] + RelatedQnAPageStatusResponse Successful Response Examples @@ -684,173 +259,39 @@ def seo_people_also_ask( client = Gooey( api_key="YOUR_API_KEY", ) - client.seo_people_also_ask( - search_query="search_query", - site_filter="site_filter", - ) + client.post_v3related_qna_maker_async() """ _response = self._client_wrapper.httpx_client.request( "v3/related-qna-maker/async", method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "search_query": search_query, - "site_filter": site_filter, - "task_instructions": task_instructions, - "query_instructions": query_instructions, - "selected_model": selected_model, - "max_search_urls": max_search_urls, - "max_references": max_references, - "max_context_words": max_context_words, - "scroll_jump": scroll_jump, - "embedding_model": embedding_model, - "dense_weight": dense_weight, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "response_format_type": response_format_type, - "serp_search_location": serp_search_location, - "scaleserp_locations": scaleserp_locations, - "serp_search_type": serp_search_type, - "scaleserp_search_field": scaleserp_search_field, - "settings": settings, - }, request_options=request_options, - omit=OMIT, ) try: if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( + return typing.cast( RelatedQnAPageStatusResponse, parse_obj_as( type_=RelatedQnAPageStatusResponse, # type: ignore object_=_response.json(), ), ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def seo_content( - self, - *, - search_query: str, - keywords: str, - title: str, - company_url: str, - example_id: typing.Optional[str] = None, - task_instructions: typing.Optional[str] = OMIT, - enable_html: typing.Optional[bool] = OMIT, - selected_model: typing.Optional[SeoSummaryPageRequestSelectedModel] = OMIT, - max_search_urls: typing.Optional[int] = OMIT, - enable_crosslinks: typing.Optional[bool] = OMIT, - seed: typing.Optional[int] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[SeoSummaryPageRequestResponseFormatType] = OMIT, - serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, - scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, - serp_search_type: typing.Optional[SerpSearchType] = OMIT, - scaleserp_search_field: typing.Optional[str] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[SeoSummaryPageOutput]: + def post_v3seo_summary_async( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> SeoSummaryPageStatusResponse: """ Parameters ---------- - search_query : str - - keywords : str - - title : str - - company_url : str - - example_id : typing.Optional[str] - - task_instructions : typing.Optional[str] - - enable_html : typing.Optional[bool] - - selected_model : typing.Optional[SeoSummaryPageRequestSelectedModel] - - max_search_urls : typing.Optional[int] - - enable_crosslinks : typing.Optional[bool] - - seed : typing.Optional[int] - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - response_format_type : typing.Optional[SeoSummaryPageRequestResponseFormatType] - - serp_search_location : typing.Optional[SerpSearchLocation] - - scaleserp_locations : typing.Optional[typing.Sequence[str]] - DEPRECATED: use `serp_search_location` instead - - serp_search_type : typing.Optional[SerpSearchType] - - scaleserp_search_field : typing.Optional[str] - DEPRECATED: use `serp_search_type` instead - - settings : typing.Optional[RunSettings] - request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Optional[SeoSummaryPageOutput] + SeoSummaryPageStatusResponse Successful Response Examples @@ -860,186 +301,39 @@ def seo_content( client = Gooey( api_key="YOUR_API_KEY", ) - client.seo_content( - search_query="search_query", - keywords="keywords", - title="title", - company_url="company_url", - ) + client.post_v3seo_summary_async() """ _response = self._client_wrapper.httpx_client.request( "v3/SEOSummary/async", method="POST", - params={ - "example_id": example_id, - }, - json={ - "search_query": search_query, - "keywords": keywords, - "title": title, - "company_url": company_url, - "task_instructions": task_instructions, - "enable_html": enable_html, - "selected_model": selected_model, - "max_search_urls": max_search_urls, - "enable_crosslinks": enable_crosslinks, - "seed": seed, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "response_format_type": response_format_type, - "serp_search_location": serp_search_location, - "scaleserp_locations": scaleserp_locations, - "serp_search_type": serp_search_type, - "scaleserp_search_field": scaleserp_search_field, - "settings": settings, - }, request_options=request_options, - omit=OMIT, ) try: if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( + return typing.cast( SeoSummaryPageStatusResponse, parse_obj_as( type_=SeoSummaryPageStatusResponse, # type: ignore object_=_response.json(), ), ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def web_search_llm( - self, - *, - search_query: str, - site_filter: str, - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - task_instructions: typing.Optional[str] = OMIT, - query_instructions: typing.Optional[str] = OMIT, - selected_model: typing.Optional[GoogleGptPageRequestSelectedModel] = OMIT, - max_search_urls: typing.Optional[int] = OMIT, - max_references: typing.Optional[int] = OMIT, - max_context_words: typing.Optional[int] = OMIT, - scroll_jump: typing.Optional[int] = OMIT, - embedding_model: typing.Optional[GoogleGptPageRequestEmbeddingModel] = OMIT, - dense_weight: typing.Optional[float] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[GoogleGptPageRequestResponseFormatType] = OMIT, - serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, - scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, - serp_search_type: typing.Optional[SerpSearchType] = OMIT, - scaleserp_search_field: typing.Optional[str] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[GoogleGptPageOutput]: + def post_v3google_gpt_async( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> GoogleGptPageStatusResponse: """ Parameters ---------- - search_query : str - - site_filter : str - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - task_instructions : typing.Optional[str] - - query_instructions : typing.Optional[str] - - selected_model : typing.Optional[GoogleGptPageRequestSelectedModel] - - max_search_urls : typing.Optional[int] - - max_references : typing.Optional[int] - - max_context_words : typing.Optional[int] - - scroll_jump : typing.Optional[int] - - embedding_model : typing.Optional[GoogleGptPageRequestEmbeddingModel] - - dense_weight : typing.Optional[float] - - Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. - Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - response_format_type : typing.Optional[GoogleGptPageRequestResponseFormatType] - - serp_search_location : typing.Optional[SerpSearchLocation] - - scaleserp_locations : typing.Optional[typing.Sequence[str]] - DEPRECATED: use `serp_search_location` instead - - serp_search_type : typing.Optional[SerpSearchType] - - scaleserp_search_field : typing.Optional[str] - DEPRECATED: use `serp_search_type` instead - - settings : typing.Optional[RunSettings] - request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Optional[GoogleGptPageOutput] + GoogleGptPageStatusResponse Successful Response Examples @@ -1049,145 +343,39 @@ def web_search_llm( client = Gooey( api_key="YOUR_API_KEY", ) - client.web_search_llm( - search_query="search_query", - site_filter="site_filter", - ) + client.post_v3google_gpt_async() """ _response = self._client_wrapper.httpx_client.request( "v3/google-gpt/async", method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "search_query": search_query, - "site_filter": site_filter, - "task_instructions": task_instructions, - "query_instructions": query_instructions, - "selected_model": selected_model, - "max_search_urls": max_search_urls, - "max_references": max_references, - "max_context_words": max_context_words, - "scroll_jump": scroll_jump, - "embedding_model": embedding_model, - "dense_weight": dense_weight, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "response_format_type": response_format_type, - "serp_search_location": serp_search_location, - "scaleserp_locations": scaleserp_locations, - "serp_search_type": serp_search_type, - "scaleserp_search_field": scaleserp_search_field, - "settings": settings, - }, request_options=request_options, - omit=OMIT, ) try: if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( + return typing.cast( GoogleGptPageStatusResponse, parse_obj_as( type_=GoogleGptPageStatusResponse, # type: ignore object_=_response.json(), ), ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def personalize_email( - self, - *, - email_address: str, - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - input_prompt: typing.Optional[str] = OMIT, - selected_model: typing.Optional[SocialLookupEmailPageRequestSelectedModel] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[SocialLookupEmailPageRequestResponseFormatType] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[SocialLookupEmailPageOutput]: + def post_v3social_lookup_email_async( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> SocialLookupEmailPageStatusResponse: """ Parameters ---------- - email_address : str - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - input_prompt : typing.Optional[str] - - selected_model : typing.Optional[SocialLookupEmailPageRequestSelectedModel] - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - response_format_type : typing.Optional[SocialLookupEmailPageRequestResponseFormatType] - - settings : typing.Optional[RunSettings] - request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Optional[SocialLookupEmailPageOutput] + SocialLookupEmailPageStatusResponse Successful Response Examples @@ -1197,138 +385,39 @@ def personalize_email( client = Gooey( api_key="YOUR_API_KEY", ) - client.personalize_email( - email_address="email_address", - ) + client.post_v3social_lookup_email_async() """ _response = self._client_wrapper.httpx_client.request( "v3/SocialLookupEmail/async", method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "email_address": email_address, - "input_prompt": input_prompt, - "selected_model": selected_model, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "response_format_type": response_format_type, - "settings": settings, - }, request_options=request_options, - omit=OMIT, ) try: if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( + return typing.cast( SocialLookupEmailPageStatusResponse, parse_obj_as( type_=SocialLookupEmailPageStatusResponse, # type: ignore object_=_response.json(), ), ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def bulk_run( - self, - *, - documents: typing.Sequence[str], - run_urls: typing.Sequence[str], - input_columns: typing.Dict[str, str], - output_columns: typing.Dict[str, str], - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - eval_urls: typing.Optional[typing.Sequence[str]] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[BulkRunnerPageOutput]: + def post_v3bulk_runner_async( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> BulkRunnerPageStatusResponse: """ Parameters ---------- - documents : typing.Sequence[str] - - Upload or link to a CSV or google sheet that contains your sample input data. - For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs. - Remember to includes header names in your CSV too. - - - run_urls : typing.Sequence[str] - - Provide one or more Gooey.AI workflow runs. - You can add multiple runs from the same recipe (e.g. two versions of your copilot) and we'll run the inputs over both of them. - - - input_columns : typing.Dict[str, str] - - For each input field in the Gooey.AI workflow, specify the column in your input data that corresponds to it. - - - output_columns : typing.Dict[str, str] - - For each output field in the Gooey.AI workflow, specify the column name that you'd like to use for it in the output data. - - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - eval_urls : typing.Optional[typing.Sequence[str]] - - _(optional)_ Add one or more Gooey.AI Evaluator Workflows to evaluate the results of your runs. - - - settings : typing.Optional[RunSettings] - request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Optional[BulkRunnerPageOutput] + BulkRunnerPageStatusResponse Successful Response Examples @@ -1338,145 +427,81 @@ def bulk_run( client = Gooey( api_key="YOUR_API_KEY", ) - client.bulk_run( - documents=["documents"], - run_urls=["run_urls"], - input_columns={"key": "value"}, - output_columns={"key": "value"}, - ) + client.post_v3bulk_runner_async() """ _response = self._client_wrapper.httpx_client.request( "v3/bulk-runner/async", method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "documents": documents, - "run_urls": run_urls, - "input_columns": input_columns, - "output_columns": output_columns, - "eval_urls": eval_urls, - "settings": settings, - }, request_options=request_options, - omit=OMIT, ) try: if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( + return typing.cast( BulkRunnerPageStatusResponse, parse_obj_as( type_=BulkRunnerPageStatusResponse, # type: ignore object_=_response.json(), ), ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def synthesize_data( - self, - *, - documents: typing.Sequence[str], - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - sheet_url: typing.Optional[str] = OMIT, - selected_asr_model: typing.Optional[DocExtractPageRequestSelectedAsrModel] = OMIT, - google_translate_target: typing.Optional[str] = OMIT, - glossary_document: typing.Optional[str] = OMIT, - task_instructions: typing.Optional[str] = OMIT, - selected_model: typing.Optional[DocExtractPageRequestSelectedModel] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[DocExtractPageRequestResponseFormatType] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[DocExtractPageOutput]: + def post_v3bulk_eval_async( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> BulkEvalPageStatusResponse: """ Parameters ---------- - documents : typing.Sequence[str] - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments + request_options : typing.Optional[RequestOptions] + Request-specific configuration. - sheet_url : typing.Optional[str] + Returns + ------- + BulkEvalPageStatusResponse + Successful Response - selected_asr_model : typing.Optional[DocExtractPageRequestSelectedAsrModel] + Examples + -------- + from gooey import Gooey - google_translate_target : typing.Optional[str] - - glossary_document : typing.Optional[str] - Provide a glossary to customize translation and improve accuracy of domain-specific terms. - If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing). - - task_instructions : typing.Optional[str] - - selected_model : typing.Optional[DocExtractPageRequestSelectedModel] - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - response_format_type : typing.Optional[DocExtractPageRequestResponseFormatType] - - settings : typing.Optional[RunSettings] + client = Gooey( + api_key="YOUR_API_KEY", + ) + client.post_v3bulk_eval_async() + """ + _response = self._client_wrapper.httpx_client.request( + "v3/bulk-eval/async", + method="POST", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + BulkEvalPageStatusResponse, + parse_obj_as( + type_=BulkEvalPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + def post_v3doc_extract_async( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> DocExtractPageStatusResponse: + """ + Parameters + ---------- request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Optional[DocExtractPageOutput] + DocExtractPageStatusResponse Successful Response Examples @@ -1486,133 +511,39 @@ def synthesize_data( client = Gooey( api_key="YOUR_API_KEY", ) - client.synthesize_data( - documents=["documents"], - ) + client.post_v3doc_extract_async() """ _response = self._client_wrapper.httpx_client.request( "v3/doc-extract/async", method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "documents": documents, - "sheet_url": sheet_url, - "selected_asr_model": selected_asr_model, - "google_translate_target": google_translate_target, - "glossary_document": glossary_document, - "task_instructions": task_instructions, - "selected_model": selected_model, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "response_format_type": response_format_type, - "settings": settings, - }, request_options=request_options, - omit=OMIT, ) try: if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( + return typing.cast( DocExtractPageStatusResponse, parse_obj_as( type_=DocExtractPageStatusResponse, # type: ignore object_=_response.json(), ), ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def llm( - self, - *, - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - input_prompt: typing.Optional[str] = OMIT, - selected_models: typing.Optional[typing.Sequence[CompareLlmPageRequestSelectedModelsItem]] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[CompareLlmPageRequestResponseFormatType] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[CompareLlmPageOutput]: + def post_v3compare_llm_async( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> CompareLlmPageStatusResponse: """ Parameters ---------- - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - input_prompt : typing.Optional[str] - - selected_models : typing.Optional[typing.Sequence[CompareLlmPageRequestSelectedModelsItem]] - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - response_format_type : typing.Optional[CompareLlmPageRequestResponseFormatType] - - settings : typing.Optional[RunSettings] - request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Optional[CompareLlmPageOutput] + CompareLlmPageStatusResponse Successful Response Examples @@ -1622,163 +553,39 @@ def llm( client = Gooey( api_key="YOUR_API_KEY", ) - client.llm() + client.post_v3compare_llm_async() """ _response = self._client_wrapper.httpx_client.request( "v3/CompareLLM/async", method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "input_prompt": input_prompt, - "selected_models": selected_models, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "response_format_type": response_format_type, - "settings": settings, - }, request_options=request_options, - omit=OMIT, ) try: if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( + return typing.cast( CompareLlmPageStatusResponse, parse_obj_as( type_=CompareLlmPageStatusResponse, # type: ignore object_=_response.json(), ), ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def rag( - self, - *, - search_query: str, - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - keyword_query: typing.Optional[DocSearchPageRequestKeywordQuery] = OMIT, - documents: typing.Optional[typing.Sequence[str]] = OMIT, - max_references: typing.Optional[int] = OMIT, - max_context_words: typing.Optional[int] = OMIT, - scroll_jump: typing.Optional[int] = OMIT, - doc_extract_url: typing.Optional[str] = OMIT, - embedding_model: typing.Optional[DocSearchPageRequestEmbeddingModel] = OMIT, - dense_weight: typing.Optional[float] = OMIT, - task_instructions: typing.Optional[str] = OMIT, - query_instructions: typing.Optional[str] = OMIT, - selected_model: typing.Optional[DocSearchPageRequestSelectedModel] = OMIT, - citation_style: typing.Optional[DocSearchPageRequestCitationStyle] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[DocSearchPageRequestResponseFormatType] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[DocSearchPageOutput]: + def post_v3doc_search_async( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> DocSearchPageStatusResponse: """ Parameters ---------- - search_query : str - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - keyword_query : typing.Optional[DocSearchPageRequestKeywordQuery] - - documents : typing.Optional[typing.Sequence[str]] - - max_references : typing.Optional[int] - - max_context_words : typing.Optional[int] - - scroll_jump : typing.Optional[int] - - doc_extract_url : typing.Optional[str] - - embedding_model : typing.Optional[DocSearchPageRequestEmbeddingModel] - - dense_weight : typing.Optional[float] - - Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. - Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - - - task_instructions : typing.Optional[str] - - query_instructions : typing.Optional[str] - - selected_model : typing.Optional[DocSearchPageRequestSelectedModel] - - citation_style : typing.Optional[DocSearchPageRequestCitationStyle] - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - response_format_type : typing.Optional[DocSearchPageRequestResponseFormatType] - - settings : typing.Optional[RunSettings] - request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Optional[DocSearchPageOutput] + DocSearchPageStatusResponse Successful Response Examples @@ -1788,154 +595,39 @@ def rag( client = Gooey( api_key="YOUR_API_KEY", ) - client.rag( - search_query="search_query", - ) + client.post_v3doc_search_async() """ _response = self._client_wrapper.httpx_client.request( "v3/doc-search/async", method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "search_query": search_query, - "keyword_query": keyword_query, - "documents": documents, - "max_references": max_references, - "max_context_words": max_context_words, - "scroll_jump": scroll_jump, - "doc_extract_url": doc_extract_url, - "embedding_model": embedding_model, - "dense_weight": dense_weight, - "task_instructions": task_instructions, - "query_instructions": query_instructions, - "selected_model": selected_model, - "citation_style": citation_style, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "response_format_type": response_format_type, - "settings": settings, - }, request_options=request_options, - omit=OMIT, ) try: if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( + return typing.cast( DocSearchPageStatusResponse, parse_obj_as( type_=DocSearchPageStatusResponse, # type: ignore object_=_response.json(), ), ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def doc_summary( - self, - *, - documents: typing.Sequence[str], - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - task_instructions: typing.Optional[str] = OMIT, - merge_instructions: typing.Optional[str] = OMIT, - selected_model: typing.Optional[DocSummaryPageRequestSelectedModel] = OMIT, - chain_type: typing.Optional[typing.Literal["map_reduce"]] = OMIT, - selected_asr_model: typing.Optional[DocSummaryPageRequestSelectedAsrModel] = OMIT, - google_translate_target: typing.Optional[str] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[DocSummaryPageRequestResponseFormatType] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[DocSummaryPageOutput]: + def post_v3smart_gpt_async( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> SmartGptPageStatusResponse: """ Parameters ---------- - documents : typing.Sequence[str] - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - task_instructions : typing.Optional[str] - - merge_instructions : typing.Optional[str] - - selected_model : typing.Optional[DocSummaryPageRequestSelectedModel] - - chain_type : typing.Optional[typing.Literal["map_reduce"]] - - selected_asr_model : typing.Optional[DocSummaryPageRequestSelectedAsrModel] - - google_translate_target : typing.Optional[str] - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - response_format_type : typing.Optional[DocSummaryPageRequestResponseFormatType] - - settings : typing.Optional[RunSettings] - request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Optional[DocSummaryPageOutput] + SmartGptPageStatusResponse Successful Response Examples @@ -1945,188 +637,39 @@ def doc_summary( client = Gooey( api_key="YOUR_API_KEY", ) - client.doc_summary( - documents=["documents"], - ) + client.post_v3smart_gpt_async() """ _response = self._client_wrapper.httpx_client.request( - "v3/doc-summary/async", + "v3/SmartGPT/async", method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "documents": documents, - "task_instructions": task_instructions, - "merge_instructions": merge_instructions, - "selected_model": selected_model, - "chain_type": chain_type, - "selected_asr_model": selected_asr_model, - "google_translate_target": google_translate_target, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "response_format_type": response_format_type, - "settings": settings, - }, request_options=request_options, - omit=OMIT, ) try: if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( - DocSummaryPageStatusResponse, + return typing.cast( + SmartGptPageStatusResponse, parse_obj_as( - type_=DocSummaryPageStatusResponse, # type: ignore + type_=SmartGptPageStatusResponse, # type: ignore object_=_response.json(), ), ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def lipsync_tts( - self, - *, - text_prompt: str, - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - tts_provider: typing.Optional[LipsyncTtsPageRequestTtsProvider] = OMIT, - uberduck_voice_name: typing.Optional[str] = OMIT, - uberduck_speaking_rate: typing.Optional[float] = OMIT, - google_voice_name: typing.Optional[str] = OMIT, - google_speaking_rate: typing.Optional[float] = OMIT, - google_pitch: typing.Optional[float] = OMIT, - bark_history_prompt: typing.Optional[str] = OMIT, - elevenlabs_voice_name: typing.Optional[str] = OMIT, - elevenlabs_api_key: typing.Optional[str] = OMIT, - elevenlabs_voice_id: typing.Optional[str] = OMIT, - elevenlabs_model: typing.Optional[str] = OMIT, - elevenlabs_stability: typing.Optional[float] = OMIT, - elevenlabs_similarity_boost: typing.Optional[float] = OMIT, - elevenlabs_style: typing.Optional[float] = OMIT, - elevenlabs_speaker_boost: typing.Optional[bool] = OMIT, - azure_voice_name: typing.Optional[str] = OMIT, - openai_voice_name: typing.Optional[LipsyncTtsPageRequestOpenaiVoiceName] = OMIT, - openai_tts_model: typing.Optional[LipsyncTtsPageRequestOpenaiTtsModel] = OMIT, - input_face: typing.Optional[str] = OMIT, - face_padding_top: typing.Optional[int] = OMIT, - face_padding_bottom: typing.Optional[int] = OMIT, - face_padding_left: typing.Optional[int] = OMIT, - face_padding_right: typing.Optional[int] = OMIT, - sadtalker_settings: typing.Optional[SadTalkerSettings] = OMIT, - selected_model: typing.Optional[LipsyncTtsPageRequestSelectedModel] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[LipsyncTtsPageOutput]: + def post_v3doc_summary_async( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> DocSummaryPageStatusResponse: """ Parameters ---------- - text_prompt : str - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - tts_provider : typing.Optional[LipsyncTtsPageRequestTtsProvider] - - uberduck_voice_name : typing.Optional[str] - - uberduck_speaking_rate : typing.Optional[float] - - google_voice_name : typing.Optional[str] - - google_speaking_rate : typing.Optional[float] - - google_pitch : typing.Optional[float] - - bark_history_prompt : typing.Optional[str] - - elevenlabs_voice_name : typing.Optional[str] - Use `elevenlabs_voice_id` instead - - elevenlabs_api_key : typing.Optional[str] - - elevenlabs_voice_id : typing.Optional[str] - - elevenlabs_model : typing.Optional[str] - - elevenlabs_stability : typing.Optional[float] - - elevenlabs_similarity_boost : typing.Optional[float] - - elevenlabs_style : typing.Optional[float] - - elevenlabs_speaker_boost : typing.Optional[bool] - - azure_voice_name : typing.Optional[str] - - openai_voice_name : typing.Optional[LipsyncTtsPageRequestOpenaiVoiceName] - - openai_tts_model : typing.Optional[LipsyncTtsPageRequestOpenaiTtsModel] - - input_face : typing.Optional[str] - - face_padding_top : typing.Optional[int] - - face_padding_bottom : typing.Optional[int] - - face_padding_left : typing.Optional[int] - - face_padding_right : typing.Optional[int] - - sadtalker_settings : typing.Optional[SadTalkerSettings] - - selected_model : typing.Optional[LipsyncTtsPageRequestSelectedModel] - - settings : typing.Optional[RunSettings] - request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Optional[LipsyncTtsPageOutput] + DocSummaryPageStatusResponse Successful Response Examples @@ -2136,180 +679,39 @@ def lipsync_tts( client = Gooey( api_key="YOUR_API_KEY", ) - client.lipsync_tts( - text_prompt="text_prompt", - ) + client.post_v3doc_summary_async() """ _response = self._client_wrapper.httpx_client.request( - "v3/LipsyncTTS/async", + "v3/doc-summary/async", method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "text_prompt": text_prompt, - "tts_provider": tts_provider, - "uberduck_voice_name": uberduck_voice_name, - "uberduck_speaking_rate": uberduck_speaking_rate, - "google_voice_name": google_voice_name, - "google_speaking_rate": google_speaking_rate, - "google_pitch": google_pitch, - "bark_history_prompt": bark_history_prompt, - "elevenlabs_voice_name": elevenlabs_voice_name, - "elevenlabs_api_key": elevenlabs_api_key, - "elevenlabs_voice_id": elevenlabs_voice_id, - "elevenlabs_model": elevenlabs_model, - "elevenlabs_stability": elevenlabs_stability, - "elevenlabs_similarity_boost": elevenlabs_similarity_boost, - "elevenlabs_style": elevenlabs_style, - "elevenlabs_speaker_boost": elevenlabs_speaker_boost, - "azure_voice_name": azure_voice_name, - "openai_voice_name": openai_voice_name, - "openai_tts_model": openai_tts_model, - "input_face": input_face, - "face_padding_top": face_padding_top, - "face_padding_bottom": face_padding_bottom, - "face_padding_left": face_padding_left, - "face_padding_right": face_padding_right, - "sadtalker_settings": sadtalker_settings, - "selected_model": selected_model, - "settings": settings, - }, request_options=request_options, - omit=OMIT, ) try: if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( - LipsyncTtsPageStatusResponse, + return typing.cast( + DocSummaryPageStatusResponse, parse_obj_as( - type_=LipsyncTtsPageStatusResponse, # type: ignore + type_=DocSummaryPageStatusResponse, # type: ignore object_=_response.json(), ), ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def text_to_speech( - self, - *, - text_prompt: str, - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - tts_provider: typing.Optional[TextToSpeechPageRequestTtsProvider] = OMIT, - uberduck_voice_name: typing.Optional[str] = OMIT, - uberduck_speaking_rate: typing.Optional[float] = OMIT, - google_voice_name: typing.Optional[str] = OMIT, - google_speaking_rate: typing.Optional[float] = OMIT, - google_pitch: typing.Optional[float] = OMIT, - bark_history_prompt: typing.Optional[str] = OMIT, - elevenlabs_voice_name: typing.Optional[str] = OMIT, - elevenlabs_api_key: typing.Optional[str] = OMIT, - elevenlabs_voice_id: typing.Optional[str] = OMIT, - elevenlabs_model: typing.Optional[str] = OMIT, - elevenlabs_stability: typing.Optional[float] = OMIT, - elevenlabs_similarity_boost: typing.Optional[float] = OMIT, - elevenlabs_style: typing.Optional[float] = OMIT, - elevenlabs_speaker_boost: typing.Optional[bool] = OMIT, - azure_voice_name: typing.Optional[str] = OMIT, - openai_voice_name: typing.Optional[TextToSpeechPageRequestOpenaiVoiceName] = OMIT, - openai_tts_model: typing.Optional[TextToSpeechPageRequestOpenaiTtsModel] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[TextToSpeechPageOutput]: + def post_v3functions_async( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> FunctionsPageStatusResponse: """ Parameters ---------- - text_prompt : str - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - tts_provider : typing.Optional[TextToSpeechPageRequestTtsProvider] - - uberduck_voice_name : typing.Optional[str] - - uberduck_speaking_rate : typing.Optional[float] - - google_voice_name : typing.Optional[str] - - google_speaking_rate : typing.Optional[float] - - google_pitch : typing.Optional[float] - - bark_history_prompt : typing.Optional[str] - - elevenlabs_voice_name : typing.Optional[str] - Use `elevenlabs_voice_id` instead - - elevenlabs_api_key : typing.Optional[str] - - elevenlabs_voice_id : typing.Optional[str] - - elevenlabs_model : typing.Optional[str] - - elevenlabs_stability : typing.Optional[float] - - elevenlabs_similarity_boost : typing.Optional[float] - - elevenlabs_style : typing.Optional[float] - - elevenlabs_speaker_boost : typing.Optional[bool] - - azure_voice_name : typing.Optional[str] - - openai_voice_name : typing.Optional[TextToSpeechPageRequestOpenaiVoiceName] - - openai_tts_model : typing.Optional[TextToSpeechPageRequestOpenaiTtsModel] - - settings : typing.Optional[RunSettings] - request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Optional[TextToSpeechPageOutput] + FunctionsPageStatusResponse Successful Response Examples @@ -2319,145 +721,39 @@ def text_to_speech( client = Gooey( api_key="YOUR_API_KEY", ) - client.text_to_speech( - text_prompt="text_prompt", - ) + client.post_v3functions_async() """ _response = self._client_wrapper.httpx_client.request( - "v3/TextToSpeech/async", + "v3/functions/async", method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "text_prompt": text_prompt, - "tts_provider": tts_provider, - "uberduck_voice_name": uberduck_voice_name, - "uberduck_speaking_rate": uberduck_speaking_rate, - "google_voice_name": google_voice_name, - "google_speaking_rate": google_speaking_rate, - "google_pitch": google_pitch, - "bark_history_prompt": bark_history_prompt, - "elevenlabs_voice_name": elevenlabs_voice_name, - "elevenlabs_api_key": elevenlabs_api_key, - "elevenlabs_voice_id": elevenlabs_voice_id, - "elevenlabs_model": elevenlabs_model, - "elevenlabs_stability": elevenlabs_stability, - "elevenlabs_similarity_boost": elevenlabs_similarity_boost, - "elevenlabs_style": elevenlabs_style, - "elevenlabs_speaker_boost": elevenlabs_speaker_boost, - "azure_voice_name": azure_voice_name, - "openai_voice_name": openai_voice_name, - "openai_tts_model": openai_tts_model, - "settings": settings, - }, request_options=request_options, - omit=OMIT, ) try: if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( - TextToSpeechPageStatusResponse, + return typing.cast( + FunctionsPageStatusResponse, parse_obj_as( - type_=TextToSpeechPageStatusResponse, # type: ignore + type_=FunctionsPageStatusResponse, # type: ignore object_=_response.json(), ), ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def speech_recognition( - self, - *, - documents: typing.Sequence[str], - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - selected_model: typing.Optional[AsrPageRequestSelectedModel] = OMIT, - language: typing.Optional[str] = OMIT, - translation_model: typing.Optional[AsrPageRequestTranslationModel] = OMIT, - output_format: typing.Optional[AsrPageRequestOutputFormat] = OMIT, - google_translate_target: typing.Optional[str] = OMIT, - translation_source: typing.Optional[str] = OMIT, - translation_target: typing.Optional[str] = OMIT, - glossary_document: typing.Optional[str] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[AsrPageOutput]: + def post_v3lipsync_async( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> LipsyncPageStatusResponse: """ Parameters ---------- - documents : typing.Sequence[str] - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - selected_model : typing.Optional[AsrPageRequestSelectedModel] - - language : typing.Optional[str] - - translation_model : typing.Optional[AsrPageRequestTranslationModel] - - output_format : typing.Optional[AsrPageRequestOutputFormat] - - google_translate_target : typing.Optional[str] - use `translation_model` & `translation_target` instead. - - translation_source : typing.Optional[str] - - translation_target : typing.Optional[str] - - glossary_document : typing.Optional[str] - Provide a glossary to customize translation and improve accuracy of domain-specific terms. - If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing). - - settings : typing.Optional[RunSettings] - request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Optional[AsrPageOutput] + LipsyncPageStatusResponse Successful Response Examples @@ -2467,132 +763,39 @@ def speech_recognition( client = Gooey( api_key="YOUR_API_KEY", ) - client.speech_recognition( - documents=["documents"], - ) + client.post_v3lipsync_async() """ _response = self._client_wrapper.httpx_client.request( - "v3/asr/async", + "v3/Lipsync/async", method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "documents": documents, - "selected_model": selected_model, - "language": language, - "translation_model": translation_model, - "output_format": output_format, - "google_translate_target": google_translate_target, - "translation_source": translation_source, - "translation_target": translation_target, - "glossary_document": glossary_document, - "settings": settings, - }, request_options=request_options, - omit=OMIT, ) try: if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( - AsrPageStatusResponse, + return typing.cast( + LipsyncPageStatusResponse, parse_obj_as( - type_=AsrPageStatusResponse, # type: ignore + type_=LipsyncPageStatusResponse, # type: ignore object_=_response.json(), ), ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def text_to_music( - self, - *, - text_prompt: str, - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - negative_prompt: typing.Optional[str] = OMIT, - duration_sec: typing.Optional[float] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[int] = OMIT, - guidance_scale: typing.Optional[float] = OMIT, - seed: typing.Optional[int] = OMIT, - sd2upscaling: typing.Optional[bool] = OMIT, - selected_models: typing.Optional[typing.Sequence[typing.Literal["audio_ldm"]]] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[Text2AudioPageOutput]: + def post_v3lipsync_tts_async( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> LipsyncTtsPageStatusResponse: """ Parameters ---------- - text_prompt : str - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - negative_prompt : typing.Optional[str] - - duration_sec : typing.Optional[float] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[int] - - guidance_scale : typing.Optional[float] - - seed : typing.Optional[int] - - sd2upscaling : typing.Optional[bool] - - selected_models : typing.Optional[typing.Sequence[typing.Literal["audio_ldm"]]] - - settings : typing.Optional[RunSettings] - request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Optional[Text2AudioPageOutput] + LipsyncTtsPageStatusResponse Successful Response Examples @@ -2602,122 +805,79 @@ def text_to_music( client = Gooey( api_key="YOUR_API_KEY", ) - client.text_to_music( - text_prompt="text_prompt", - ) + client.post_v3lipsync_tts_async() """ _response = self._client_wrapper.httpx_client.request( - "v3/text2audio/async", + "v3/LipsyncTTS/async", method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "text_prompt": text_prompt, - "negative_prompt": negative_prompt, - "duration_sec": duration_sec, - "num_outputs": num_outputs, - "quality": quality, - "guidance_scale": guidance_scale, - "seed": seed, - "sd_2_upscaling": sd2upscaling, - "selected_models": selected_models, - "settings": settings, - }, request_options=request_options, - omit=OMIT, ) try: if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( - Text2AudioPageStatusResponse, + return typing.cast( + LipsyncTtsPageStatusResponse, parse_obj_as( - type_=Text2AudioPageStatusResponse, # type: ignore + type_=LipsyncTtsPageStatusResponse, # type: ignore object_=_response.json(), ), ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def translate( - self, - *, - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - texts: typing.Optional[typing.Sequence[str]] = OMIT, - selected_model: typing.Optional[TranslationPageRequestSelectedModel] = OMIT, - translation_source: typing.Optional[str] = OMIT, - translation_target: typing.Optional[str] = OMIT, - glossary_document: typing.Optional[str] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[TranslationPageOutput]: + def post_v3text_to_speech_async( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> TextToSpeechPageStatusResponse: """ Parameters ---------- - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] + request_options : typing.Optional[RequestOptions] + Request-specific configuration. - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments + Returns + ------- + TextToSpeechPageStatusResponse + Successful Response - texts : typing.Optional[typing.Sequence[str]] - - selected_model : typing.Optional[TranslationPageRequestSelectedModel] - - translation_source : typing.Optional[str] - - translation_target : typing.Optional[str] - - glossary_document : typing.Optional[str] - Provide a glossary to customize translation and improve accuracy of domain-specific terms. - If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing). + Examples + -------- + from gooey import Gooey - settings : typing.Optional[RunSettings] + client = Gooey( + api_key="YOUR_API_KEY", + ) + client.post_v3text_to_speech_async() + """ + _response = self._client_wrapper.httpx_client.request( + "v3/TextToSpeech/async", + method="POST", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + TextToSpeechPageStatusResponse, + parse_obj_as( + type_=TextToSpeechPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + def post_v3asr_async(self, *, request_options: typing.Optional[RequestOptions] = None) -> AsrPageStatusResponse: + """ + Parameters + ---------- request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Optional[TranslationPageOutput] + AsrPageStatusResponse Successful Response Examples @@ -2727,141 +887,123 @@ def translate( client = Gooey( api_key="YOUR_API_KEY", ) - client.translate() + client.post_v3asr_async() """ _response = self._client_wrapper.httpx_client.request( - "v3/translate/async", + "v3/asr/async", method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "texts": texts, - "selected_model": selected_model, - "translation_source": translation_source, - "translation_target": translation_target, - "glossary_document": glossary_document, - "settings": settings, - }, request_options=request_options, - omit=OMIT, ) try: if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( - TranslationPageStatusResponse, + return typing.cast( + AsrPageStatusResponse, parse_obj_as( - type_=TranslationPageStatusResponse, # type: ignore + type_=AsrPageStatusResponse, # type: ignore object_=_response.json(), ), ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def remix_image( - self, - *, - input_image: str, - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - text_prompt: typing.Optional[str] = OMIT, - selected_model: typing.Optional[Img2ImgPageRequestSelectedModel] = OMIT, - selected_controlnet_model: typing.Optional[Img2ImgPageRequestSelectedControlnetModel] = OMIT, - negative_prompt: typing.Optional[str] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[int] = OMIT, - output_width: typing.Optional[int] = OMIT, - output_height: typing.Optional[int] = OMIT, - guidance_scale: typing.Optional[float] = OMIT, - prompt_strength: typing.Optional[float] = OMIT, - controlnet_conditioning_scale: typing.Optional[typing.Sequence[float]] = OMIT, - seed: typing.Optional[int] = OMIT, - image_guidance_scale: typing.Optional[float] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[Img2ImgPageOutput]: + def post_v3text2audio_async( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> Text2AudioPageStatusResponse: """ Parameters ---------- - input_image : str - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - text_prompt : typing.Optional[str] - - selected_model : typing.Optional[Img2ImgPageRequestSelectedModel] - - selected_controlnet_model : typing.Optional[Img2ImgPageRequestSelectedControlnetModel] - - negative_prompt : typing.Optional[str] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[int] - - output_width : typing.Optional[int] + request_options : typing.Optional[RequestOptions] + Request-specific configuration. - output_height : typing.Optional[int] + Returns + ------- + Text2AudioPageStatusResponse + Successful Response - guidance_scale : typing.Optional[float] + Examples + -------- + from gooey import Gooey - prompt_strength : typing.Optional[float] + client = Gooey( + api_key="YOUR_API_KEY", + ) + client.post_v3text2audio_async() + """ + _response = self._client_wrapper.httpx_client.request( + "v3/text2audio/async", + method="POST", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + Text2AudioPageStatusResponse, + parse_obj_as( + type_=Text2AudioPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) - controlnet_conditioning_scale : typing.Optional[typing.Sequence[float]] + def post_v3translate_async( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> TranslationPageStatusResponse: + """ + Parameters + ---------- + request_options : typing.Optional[RequestOptions] + Request-specific configuration. - seed : typing.Optional[int] + Returns + ------- + TranslationPageStatusResponse + Successful Response - image_guidance_scale : typing.Optional[float] + Examples + -------- + from gooey import Gooey - settings : typing.Optional[RunSettings] + client = Gooey( + api_key="YOUR_API_KEY", + ) + client.post_v3translate_async() + """ + _response = self._client_wrapper.httpx_client.request( + "v3/translate/async", + method="POST", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + TranslationPageStatusResponse, + parse_obj_as( + type_=TranslationPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + def post_v3img2img_async( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> Img2ImgPageStatusResponse: + """ + Parameters + ---------- request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Optional[Img2ImgPageOutput] + Img2ImgPageStatusResponse Successful Response Examples @@ -2871,155 +1013,39 @@ def remix_image( client = Gooey( api_key="YOUR_API_KEY", ) - client.remix_image( - input_image="input_image", - ) + client.post_v3img2img_async() """ _response = self._client_wrapper.httpx_client.request( "v3/Img2Img/async", method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "input_image": input_image, - "text_prompt": text_prompt, - "selected_model": selected_model, - "selected_controlnet_model": selected_controlnet_model, - "negative_prompt": negative_prompt, - "num_outputs": num_outputs, - "quality": quality, - "output_width": output_width, - "output_height": output_height, - "guidance_scale": guidance_scale, - "prompt_strength": prompt_strength, - "controlnet_conditioning_scale": controlnet_conditioning_scale, - "seed": seed, - "image_guidance_scale": image_guidance_scale, - "settings": settings, - }, request_options=request_options, - omit=OMIT, ) try: if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( + return typing.cast( Img2ImgPageStatusResponse, parse_obj_as( type_=Img2ImgPageStatusResponse, # type: ignore object_=_response.json(), ), ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def text_to_image( - self, - *, - text_prompt: str, - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - negative_prompt: typing.Optional[str] = OMIT, - output_width: typing.Optional[int] = OMIT, - output_height: typing.Optional[int] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[int] = OMIT, - dall_e3quality: typing.Optional[str] = OMIT, - dall_e3style: typing.Optional[str] = OMIT, - guidance_scale: typing.Optional[float] = OMIT, - seed: typing.Optional[int] = OMIT, - sd2upscaling: typing.Optional[bool] = OMIT, - selected_models: typing.Optional[typing.Sequence[CompareText2ImgPageRequestSelectedModelsItem]] = OMIT, - scheduler: typing.Optional[CompareText2ImgPageRequestScheduler] = OMIT, - edit_instruction: typing.Optional[str] = OMIT, - image_guidance_scale: typing.Optional[float] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[CompareText2ImgPageOutput]: + def post_v3compare_text2img_async( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> CompareText2ImgPageStatusResponse: """ Parameters ---------- - text_prompt : str - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - negative_prompt : typing.Optional[str] - - output_width : typing.Optional[int] - - output_height : typing.Optional[int] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[int] - - dall_e3quality : typing.Optional[str] - - dall_e3style : typing.Optional[str] - - guidance_scale : typing.Optional[float] - - seed : typing.Optional[int] - - sd2upscaling : typing.Optional[bool] - - selected_models : typing.Optional[typing.Sequence[CompareText2ImgPageRequestSelectedModelsItem]] - - scheduler : typing.Optional[CompareText2ImgPageRequestScheduler] - - edit_instruction : typing.Optional[str] - - image_guidance_scale : typing.Optional[float] - - settings : typing.Optional[RunSettings] - request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Optional[CompareText2ImgPageOutput] + CompareText2ImgPageStatusResponse Successful Response Examples @@ -3029,156 +1055,39 @@ def text_to_image( client = Gooey( api_key="YOUR_API_KEY", ) - client.text_to_image( - text_prompt="text_prompt", - ) + client.post_v3compare_text2img_async() """ _response = self._client_wrapper.httpx_client.request( "v3/CompareText2Img/async", method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "text_prompt": text_prompt, - "negative_prompt": negative_prompt, - "output_width": output_width, - "output_height": output_height, - "num_outputs": num_outputs, - "quality": quality, - "dall_e_3_quality": dall_e3quality, - "dall_e_3_style": dall_e3style, - "guidance_scale": guidance_scale, - "seed": seed, - "sd_2_upscaling": sd2upscaling, - "selected_models": selected_models, - "scheduler": scheduler, - "edit_instruction": edit_instruction, - "image_guidance_scale": image_guidance_scale, - "settings": settings, - }, request_options=request_options, - omit=OMIT, ) try: if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( + return typing.cast( CompareText2ImgPageStatusResponse, parse_obj_as( type_=CompareText2ImgPageStatusResponse, # type: ignore object_=_response.json(), ), ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def product_image( - self, - *, - input_image: str, - text_prompt: str, - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - obj_scale: typing.Optional[float] = OMIT, - obj_pos_x: typing.Optional[float] = OMIT, - obj_pos_y: typing.Optional[float] = OMIT, - mask_threshold: typing.Optional[float] = OMIT, - selected_model: typing.Optional[ObjectInpaintingPageRequestSelectedModel] = OMIT, - negative_prompt: typing.Optional[str] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[int] = OMIT, - output_width: typing.Optional[int] = OMIT, - output_height: typing.Optional[int] = OMIT, - guidance_scale: typing.Optional[float] = OMIT, - sd2upscaling: typing.Optional[bool] = OMIT, - seed: typing.Optional[int] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[ObjectInpaintingPageOutput]: + def post_v3object_inpainting_async( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> ObjectInpaintingPageStatusResponse: """ Parameters ---------- - input_image : str - - text_prompt : str - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - obj_scale : typing.Optional[float] - - obj_pos_x : typing.Optional[float] - - obj_pos_y : typing.Optional[float] - - mask_threshold : typing.Optional[float] - - selected_model : typing.Optional[ObjectInpaintingPageRequestSelectedModel] - - negative_prompt : typing.Optional[str] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[int] - - output_width : typing.Optional[int] - - output_height : typing.Optional[int] - - guidance_scale : typing.Optional[float] - - sd2upscaling : typing.Optional[bool] - - seed : typing.Optional[int] - - settings : typing.Optional[RunSettings] - request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Optional[ObjectInpaintingPageOutput] + ObjectInpaintingPageStatusResponse Successful Response Examples @@ -3188,154 +1097,39 @@ def product_image( client = Gooey( api_key="YOUR_API_KEY", ) - client.product_image( - input_image="input_image", - text_prompt="text_prompt", - ) + client.post_v3object_inpainting_async() """ _response = self._client_wrapper.httpx_client.request( "v3/ObjectInpainting/async", method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "input_image": input_image, - "text_prompt": text_prompt, - "obj_scale": obj_scale, - "obj_pos_x": obj_pos_x, - "obj_pos_y": obj_pos_y, - "mask_threshold": mask_threshold, - "selected_model": selected_model, - "negative_prompt": negative_prompt, - "num_outputs": num_outputs, - "quality": quality, - "output_width": output_width, - "output_height": output_height, - "guidance_scale": guidance_scale, - "sd_2_upscaling": sd2upscaling, - "seed": seed, - "settings": settings, - }, request_options=request_options, - omit=OMIT, ) try: if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( + return typing.cast( ObjectInpaintingPageStatusResponse, parse_obj_as( type_=ObjectInpaintingPageStatusResponse, # type: ignore object_=_response.json(), ), ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def portrait( - self, - *, - input_image: str, - text_prompt: str, - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - face_scale: typing.Optional[float] = OMIT, - face_pos_x: typing.Optional[float] = OMIT, - face_pos_y: typing.Optional[float] = OMIT, - selected_model: typing.Optional[FaceInpaintingPageRequestSelectedModel] = OMIT, - negative_prompt: typing.Optional[str] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[int] = OMIT, - upscale_factor: typing.Optional[float] = OMIT, - output_width: typing.Optional[int] = OMIT, - output_height: typing.Optional[int] = OMIT, - guidance_scale: typing.Optional[float] = OMIT, - seed: typing.Optional[int] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[FaceInpaintingPageOutput]: + def post_v3face_inpainting_async( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> FaceInpaintingPageStatusResponse: """ Parameters ---------- - input_image : str - - text_prompt : str - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - face_scale : typing.Optional[float] - - face_pos_x : typing.Optional[float] - - face_pos_y : typing.Optional[float] - - selected_model : typing.Optional[FaceInpaintingPageRequestSelectedModel] - - negative_prompt : typing.Optional[str] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[int] - - upscale_factor : typing.Optional[float] - - output_width : typing.Optional[int] - - output_height : typing.Optional[int] - - guidance_scale : typing.Optional[float] - - seed : typing.Optional[int] - - settings : typing.Optional[RunSettings] - request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Optional[FaceInpaintingPageOutput] + FaceInpaintingPageStatusResponse Successful Response Examples @@ -3345,180 +1139,39 @@ def portrait( client = Gooey( api_key="YOUR_API_KEY", ) - client.portrait( - input_image="input_image", - text_prompt="tony stark from the iron man", - ) + client.post_v3face_inpainting_async() """ _response = self._client_wrapper.httpx_client.request( "v3/FaceInpainting/async", method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "input_image": input_image, - "text_prompt": text_prompt, - "face_scale": face_scale, - "face_pos_x": face_pos_x, - "face_pos_y": face_pos_y, - "selected_model": selected_model, - "negative_prompt": negative_prompt, - "num_outputs": num_outputs, - "quality": quality, - "upscale_factor": upscale_factor, - "output_width": output_width, - "output_height": output_height, - "guidance_scale": guidance_scale, - "seed": seed, - "settings": settings, - }, request_options=request_options, - omit=OMIT, ) try: if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( + return typing.cast( FaceInpaintingPageStatusResponse, parse_obj_as( type_=FaceInpaintingPageStatusResponse, # type: ignore object_=_response.json(), ), ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def image_from_email( - self, - *, - text_prompt: str, - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - email_address: typing.Optional[str] = OMIT, - twitter_handle: typing.Optional[str] = OMIT, - face_scale: typing.Optional[float] = OMIT, - face_pos_x: typing.Optional[float] = OMIT, - face_pos_y: typing.Optional[float] = OMIT, - selected_model: typing.Optional[EmailFaceInpaintingPageRequestSelectedModel] = OMIT, - negative_prompt: typing.Optional[str] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[int] = OMIT, - upscale_factor: typing.Optional[float] = OMIT, - output_width: typing.Optional[int] = OMIT, - output_height: typing.Optional[int] = OMIT, - guidance_scale: typing.Optional[float] = OMIT, - should_send_email: typing.Optional[bool] = OMIT, - email_from: typing.Optional[str] = OMIT, - email_cc: typing.Optional[str] = OMIT, - email_bcc: typing.Optional[str] = OMIT, - email_subject: typing.Optional[str] = OMIT, - email_body: typing.Optional[str] = OMIT, - email_body_enable_html: typing.Optional[bool] = OMIT, - fallback_email_body: typing.Optional[str] = OMIT, - seed: typing.Optional[int] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[EmailFaceInpaintingPageOutput]: + def post_v3email_face_inpainting_async( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> EmailFaceInpaintingPageStatusResponse: """ Parameters ---------- - text_prompt : str - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - email_address : typing.Optional[str] - - twitter_handle : typing.Optional[str] - - face_scale : typing.Optional[float] - - face_pos_x : typing.Optional[float] - - face_pos_y : typing.Optional[float] - - selected_model : typing.Optional[EmailFaceInpaintingPageRequestSelectedModel] - - negative_prompt : typing.Optional[str] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[int] - - upscale_factor : typing.Optional[float] - - output_width : typing.Optional[int] - - output_height : typing.Optional[int] - - guidance_scale : typing.Optional[float] - - should_send_email : typing.Optional[bool] - - email_from : typing.Optional[str] - - email_cc : typing.Optional[str] - - email_bcc : typing.Optional[str] - - email_subject : typing.Optional[str] - - email_body : typing.Optional[str] - - email_body_enable_html : typing.Optional[bool] - - fallback_email_body : typing.Optional[str] - - seed : typing.Optional[int] - - settings : typing.Optional[RunSettings] - request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Optional[EmailFaceInpaintingPageOutput] + EmailFaceInpaintingPageStatusResponse Successful Response Examples @@ -3528,160 +1181,123 @@ def image_from_email( client = Gooey( api_key="YOUR_API_KEY", ) - client.image_from_email( - email_address="sean@dara.network", - text_prompt="winter's day in paris", - ) + client.post_v3email_face_inpainting_async() """ _response = self._client_wrapper.httpx_client.request( "v3/EmailFaceInpainting/async", method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "email_address": email_address, - "twitter_handle": twitter_handle, - "text_prompt": text_prompt, - "face_scale": face_scale, - "face_pos_x": face_pos_x, - "face_pos_y": face_pos_y, - "selected_model": selected_model, - "negative_prompt": negative_prompt, - "num_outputs": num_outputs, - "quality": quality, - "upscale_factor": upscale_factor, - "output_width": output_width, - "output_height": output_height, - "guidance_scale": guidance_scale, - "should_send_email": should_send_email, - "email_from": email_from, - "email_cc": email_cc, - "email_bcc": email_bcc, - "email_subject": email_subject, - "email_body": email_body, - "email_body_enable_html": email_body_enable_html, - "fallback_email_body": fallback_email_body, - "seed": seed, - "settings": settings, - }, request_options=request_options, - omit=OMIT, ) try: if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( + return typing.cast( EmailFaceInpaintingPageStatusResponse, parse_obj_as( type_=EmailFaceInpaintingPageStatusResponse, # type: ignore object_=_response.json(), ), ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def image_from_web_search( - self, - *, - search_query: str, - text_prompt: str, - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, - scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, - selected_model: typing.Optional[GoogleImageGenPageRequestSelectedModel] = OMIT, - negative_prompt: typing.Optional[str] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[int] = OMIT, - guidance_scale: typing.Optional[float] = OMIT, - prompt_strength: typing.Optional[float] = OMIT, - sd2upscaling: typing.Optional[bool] = OMIT, - seed: typing.Optional[int] = OMIT, - image_guidance_scale: typing.Optional[float] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[GoogleImageGenPageOutput]: + def post_v3google_image_gen_async( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> GoogleImageGenPageStatusResponse: """ Parameters ---------- - search_query : str - - text_prompt : str - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - serp_search_location : typing.Optional[SerpSearchLocation] - - scaleserp_locations : typing.Optional[typing.Sequence[str]] - DEPRECATED: use `serp_search_location` instead - - selected_model : typing.Optional[GoogleImageGenPageRequestSelectedModel] - - negative_prompt : typing.Optional[str] - - num_outputs : typing.Optional[int] + request_options : typing.Optional[RequestOptions] + Request-specific configuration. - quality : typing.Optional[int] + Returns + ------- + GoogleImageGenPageStatusResponse + Successful Response - guidance_scale : typing.Optional[float] + Examples + -------- + from gooey import Gooey - prompt_strength : typing.Optional[float] + client = Gooey( + api_key="YOUR_API_KEY", + ) + client.post_v3google_image_gen_async() + """ + _response = self._client_wrapper.httpx_client.request( + "v3/GoogleImageGen/async", + method="POST", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + GoogleImageGenPageStatusResponse, + parse_obj_as( + type_=GoogleImageGenPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) - sd2upscaling : typing.Optional[bool] + def post_v3image_segmentation_async( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> ImageSegmentationPageStatusResponse: + """ + Parameters + ---------- + request_options : typing.Optional[RequestOptions] + Request-specific configuration. - seed : typing.Optional[int] + Returns + ------- + ImageSegmentationPageStatusResponse + Successful Response - image_guidance_scale : typing.Optional[float] + Examples + -------- + from gooey import Gooey - settings : typing.Optional[RunSettings] + client = Gooey( + api_key="YOUR_API_KEY", + ) + client.post_v3image_segmentation_async() + """ + _response = self._client_wrapper.httpx_client.request( + "v3/ImageSegmentation/async", + method="POST", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + ImageSegmentationPageStatusResponse, + parse_obj_as( + type_=ImageSegmentationPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + def post_v3compare_ai_upscalers_async( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> CompareUpscalerPageStatusResponse: + """ + Parameters + ---------- request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Optional[GoogleImageGenPageOutput] + CompareUpscalerPageStatusResponse Successful Response Examples @@ -3691,134 +1307,39 @@ def image_from_web_search( client = Gooey( api_key="YOUR_API_KEY", ) - client.image_from_web_search( - search_query="search_query", - text_prompt="text_prompt", - ) + client.post_v3compare_ai_upscalers_async() """ _response = self._client_wrapper.httpx_client.request( - "v3/GoogleImageGen/async", + "v3/compare-ai-upscalers/async", method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "serp_search_location": serp_search_location, - "scaleserp_locations": scaleserp_locations, - "search_query": search_query, - "text_prompt": text_prompt, - "selected_model": selected_model, - "negative_prompt": negative_prompt, - "num_outputs": num_outputs, - "quality": quality, - "guidance_scale": guidance_scale, - "prompt_strength": prompt_strength, - "sd_2_upscaling": sd2upscaling, - "seed": seed, - "image_guidance_scale": image_guidance_scale, - "settings": settings, - }, request_options=request_options, - omit=OMIT, ) try: if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( - GoogleImageGenPageStatusResponse, + return typing.cast( + CompareUpscalerPageStatusResponse, parse_obj_as( - type_=GoogleImageGenPageStatusResponse, # type: ignore + type_=CompareUpscalerPageStatusResponse, # type: ignore object_=_response.json(), ), ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def remove_background( - self, - *, - input_image: str, - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - selected_model: typing.Optional[ImageSegmentationPageRequestSelectedModel] = OMIT, - mask_threshold: typing.Optional[float] = OMIT, - rect_persepective_transform: typing.Optional[bool] = OMIT, - reflection_opacity: typing.Optional[float] = OMIT, - obj_scale: typing.Optional[float] = OMIT, - obj_pos_x: typing.Optional[float] = OMIT, - obj_pos_y: typing.Optional[float] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[ImageSegmentationPageOutput]: + def post_v3chyron_plant_async( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> ChyronPlantPageStatusResponse: """ Parameters ---------- - input_image : str - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - selected_model : typing.Optional[ImageSegmentationPageRequestSelectedModel] - - mask_threshold : typing.Optional[float] - - rect_persepective_transform : typing.Optional[bool] - - reflection_opacity : typing.Optional[float] - - obj_scale : typing.Optional[float] - - obj_pos_x : typing.Optional[float] - - obj_pos_y : typing.Optional[float] - - settings : typing.Optional[RunSettings] - request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Optional[ImageSegmentationPageOutput] + ChyronPlantPageStatusResponse Successful Response Examples @@ -3828,122 +1349,39 @@ def remove_background( client = Gooey( api_key="YOUR_API_KEY", ) - client.remove_background( - input_image="input_image", - ) + client.post_v3chyron_plant_async() """ _response = self._client_wrapper.httpx_client.request( - "v3/ImageSegmentation/async", + "v3/ChyronPlant/async", method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "input_image": input_image, - "selected_model": selected_model, - "mask_threshold": mask_threshold, - "rect_persepective_transform": rect_persepective_transform, - "reflection_opacity": reflection_opacity, - "obj_scale": obj_scale, - "obj_pos_x": obj_pos_x, - "obj_pos_y": obj_pos_y, - "settings": settings, - }, request_options=request_options, - omit=OMIT, ) try: if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( - ImageSegmentationPageStatusResponse, + return typing.cast( + ChyronPlantPageStatusResponse, parse_obj_as( - type_=ImageSegmentationPageStatusResponse, # type: ignore + type_=ChyronPlantPageStatusResponse, # type: ignore object_=_response.json(), ), ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def upscale( - self, - *, - scale: int, - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - input_image: typing.Optional[str] = OMIT, - input_video: typing.Optional[str] = OMIT, - selected_models: typing.Optional[typing.Sequence[CompareUpscalerPageRequestSelectedModelsItem]] = OMIT, - selected_bg_model: typing.Optional[typing.Literal["real_esrgan_x2"]] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[CompareUpscalerPageOutput]: + def post_v3letter_writer_async( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> LetterWriterPageStatusResponse: """ Parameters ---------- - scale : int - The final upsampling scale of the image - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - input_image : typing.Optional[str] - Input Image - - input_video : typing.Optional[str] - Input Video - - selected_models : typing.Optional[typing.Sequence[CompareUpscalerPageRequestSelectedModelsItem]] - - selected_bg_model : typing.Optional[typing.Literal["real_esrgan_x2"]] - - settings : typing.Optional[RunSettings] - request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Optional[CompareUpscalerPageOutput] + LetterWriterPageStatusResponse Successful Response Examples @@ -3953,107 +1391,39 @@ def upscale( client = Gooey( api_key="YOUR_API_KEY", ) - client.upscale( - scale=1, - ) + client.post_v3letter_writer_async() """ _response = self._client_wrapper.httpx_client.request( - "v3/compare-ai-upscalers/async", + "v3/LetterWriter/async", method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "input_image": input_image, - "input_video": input_video, - "scale": scale, - "selected_models": selected_models, - "selected_bg_model": selected_bg_model, - "settings": settings, - }, request_options=request_options, - omit=OMIT, ) try: if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( - CompareUpscalerPageStatusResponse, + return typing.cast( + LetterWriterPageStatusResponse, parse_obj_as( - type_=CompareUpscalerPageStatusResponse, # type: ignore + type_=LetterWriterPageStatusResponse, # type: ignore object_=_response.json(), ), ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def embed( - self, - *, - texts: typing.Sequence[str], - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - selected_model: typing.Optional[EmbeddingsPageRequestSelectedModel] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[EmbeddingsPageOutput]: + def post_v3embeddings_async( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> EmbeddingsPageStatusResponse: """ Parameters ---------- - texts : typing.Sequence[str] - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - selected_model : typing.Optional[EmbeddingsPageRequestSelectedModel] - - settings : typing.Optional[RunSettings] - request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Optional[EmbeddingsPageOutput] + EmbeddingsPageStatusResponse Successful Response Examples @@ -4063,173 +1433,39 @@ def embed( client = Gooey( api_key="YOUR_API_KEY", ) - client.embed( - texts=["texts"], - ) + client.post_v3embeddings_async() """ _response = self._client_wrapper.httpx_client.request( "v3/embeddings/async", method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "texts": texts, - "selected_model": selected_model, - "settings": settings, - }, request_options=request_options, - omit=OMIT, ) try: if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( + return typing.cast( EmbeddingsPageStatusResponse, parse_obj_as( type_=EmbeddingsPageStatusResponse, # type: ignore object_=_response.json(), ), ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def seo_people_also_ask_doc( - self, - *, - search_query: str, - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - keyword_query: typing.Optional[RelatedQnADocPageRequestKeywordQuery] = OMIT, - documents: typing.Optional[typing.Sequence[str]] = OMIT, - max_references: typing.Optional[int] = OMIT, - max_context_words: typing.Optional[int] = OMIT, - scroll_jump: typing.Optional[int] = OMIT, - doc_extract_url: typing.Optional[str] = OMIT, - embedding_model: typing.Optional[RelatedQnADocPageRequestEmbeddingModel] = OMIT, - dense_weight: typing.Optional[float] = OMIT, - task_instructions: typing.Optional[str] = OMIT, - query_instructions: typing.Optional[str] = OMIT, - selected_model: typing.Optional[RelatedQnADocPageRequestSelectedModel] = OMIT, - citation_style: typing.Optional[RelatedQnADocPageRequestCitationStyle] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[RelatedQnADocPageRequestResponseFormatType] = OMIT, - serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, - scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, - serp_search_type: typing.Optional[SerpSearchType] = OMIT, - scaleserp_search_field: typing.Optional[str] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[RelatedQnADocPageOutput]: + def post_v3related_qna_maker_doc_async( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> RelatedQnADocPageStatusResponse: """ Parameters ---------- - search_query : str - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - keyword_query : typing.Optional[RelatedQnADocPageRequestKeywordQuery] - - documents : typing.Optional[typing.Sequence[str]] - - max_references : typing.Optional[int] - - max_context_words : typing.Optional[int] - - scroll_jump : typing.Optional[int] - - doc_extract_url : typing.Optional[str] - - embedding_model : typing.Optional[RelatedQnADocPageRequestEmbeddingModel] - - dense_weight : typing.Optional[float] - - Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. - Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - - - task_instructions : typing.Optional[str] - - query_instructions : typing.Optional[str] - - selected_model : typing.Optional[RelatedQnADocPageRequestSelectedModel] - - citation_style : typing.Optional[RelatedQnADocPageRequestCitationStyle] - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - response_format_type : typing.Optional[RelatedQnADocPageRequestResponseFormatType] - - serp_search_location : typing.Optional[SerpSearchLocation] - - scaleserp_locations : typing.Optional[typing.Sequence[str]] - DEPRECATED: use `serp_search_location` instead - - serp_search_type : typing.Optional[SerpSearchType] - - scaleserp_search_field : typing.Optional[str] - DEPRECATED: use `serp_search_type` instead - - settings : typing.Optional[RunSettings] - request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Optional[RelatedQnADocPageOutput] + RelatedQnADocPageStatusResponse Successful Response Examples @@ -4239,87 +1475,22 @@ def seo_people_also_ask_doc( client = Gooey( api_key="YOUR_API_KEY", ) - client.seo_people_also_ask_doc( - search_query="search_query", - ) + client.post_v3related_qna_maker_doc_async() """ _response = self._client_wrapper.httpx_client.request( "v3/related-qna-maker-doc/async", method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "search_query": search_query, - "keyword_query": keyword_query, - "documents": documents, - "max_references": max_references, - "max_context_words": max_context_words, - "scroll_jump": scroll_jump, - "doc_extract_url": doc_extract_url, - "embedding_model": embedding_model, - "dense_weight": dense_weight, - "task_instructions": task_instructions, - "query_instructions": query_instructions, - "selected_model": selected_model, - "citation_style": citation_style, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "response_format_type": response_format_type, - "serp_search_location": serp_search_location, - "scaleserp_locations": scaleserp_locations, - "serp_search_type": serp_search_type, - "scaleserp_search_field": scaleserp_search_field, - "settings": settings, - }, request_options=request_options, - omit=OMIT, ) try: if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( + return typing.cast( RelatedQnADocPageStatusResponse, parse_obj_as( type_=RelatedQnADocPageStatusResponse, # type: ignore object_=_response.json(), ), ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) @@ -4429,83 +1600,77 @@ def __init__( timeout=_defaulted_timeout, ) self.copilot_integrations = AsyncCopilotIntegrationsClient(client_wrapper=self._client_wrapper) - self.copilot_for_your_enterprise = AsyncCopilotForYourEnterpriseClient(client_wrapper=self._client_wrapper) - self.evaluator = AsyncEvaluatorClient(client_wrapper=self._client_wrapper) - self.smart_gpt = AsyncSmartGptClient(client_wrapper=self._client_wrapper) - self.functions = AsyncFunctionsClient(client_wrapper=self._client_wrapper) - self.lip_syncing = AsyncLipSyncingClient(client_wrapper=self._client_wrapper) self.misc = AsyncMiscClient(client_wrapper=self._client_wrapper) - async def animate( - self, - *, - animation_prompts: typing.Sequence[AnimationPrompt], - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - max_frames: typing.Optional[int] = OMIT, - selected_model: typing.Optional[DeforumSdPageRequestSelectedModel] = OMIT, - animation_mode: typing.Optional[str] = OMIT, - zoom: typing.Optional[str] = OMIT, - translation_x: typing.Optional[str] = OMIT, - translation_y: typing.Optional[str] = OMIT, - rotation3d_x: typing.Optional[str] = OMIT, - rotation3d_y: typing.Optional[str] = OMIT, - rotation3d_z: typing.Optional[str] = OMIT, - fps: typing.Optional[int] = OMIT, - seed: typing.Optional[int] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[DeforumSdPageOutput]: + async def post_v3video_bots_async( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> VideoBotsPageStatusResponse: """ Parameters ---------- - animation_prompts : typing.Sequence[AnimationPrompt] - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - max_frames : typing.Optional[int] - - selected_model : typing.Optional[DeforumSdPageRequestSelectedModel] - - animation_mode : typing.Optional[str] - - zoom : typing.Optional[str] + request_options : typing.Optional[RequestOptions] + Request-specific configuration. - translation_x : typing.Optional[str] + Returns + ------- + VideoBotsPageStatusResponse + Successful Response - translation_y : typing.Optional[str] + Examples + -------- + import asyncio - rotation3d_x : typing.Optional[str] + from gooey import AsyncGooey - rotation3d_y : typing.Optional[str] + client = AsyncGooey( + api_key="YOUR_API_KEY", + ) - rotation3d_z : typing.Optional[str] - fps : typing.Optional[int] + async def main() -> None: + await client.post_v3video_bots_async() - seed : typing.Optional[int] - settings : typing.Optional[RunSettings] + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "v3/video-bots/async", + method="POST", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + VideoBotsPageStatusResponse, + parse_obj_as( + type_=VideoBotsPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + async def post_v3deforum_sd_async( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> DeforumSdPageStatusResponse: + """ + Parameters + ---------- request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Optional[DeforumSdPageOutput] + DeforumSdPageStatusResponse Successful Response Examples -------- import asyncio - from gooey import AnimationPrompt, AsyncGooey + from gooey import AsyncGooey client = AsyncGooey( api_key="YOUR_API_KEY", @@ -4513,14 +1678,7 @@ async def animate( async def main() -> None: - await client.animate( - animation_prompts=[ - AnimationPrompt( - frame="frame", - prompt="prompt", - ) - ], - ) + await client.post_v3deforum_sd_async() asyncio.run(main()) @@ -4528,183 +1686,184 @@ async def main() -> None: _response = await self._client_wrapper.httpx_client.request( "v3/DeforumSD/async", method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "animation_prompts": animation_prompts, - "max_frames": max_frames, - "selected_model": selected_model, - "animation_mode": animation_mode, - "zoom": zoom, - "translation_x": translation_x, - "translation_y": translation_y, - "rotation_3d_x": rotation3d_x, - "rotation_3d_y": rotation3d_y, - "rotation_3d_z": rotation3d_z, - "fps": fps, - "seed": seed, - "settings": settings, - }, request_options=request_options, - omit=OMIT, ) try: if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( + return typing.cast( DeforumSdPageStatusResponse, parse_obj_as( type_=DeforumSdPageStatusResponse, # type: ignore object_=_response.json(), ), ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def qr_code( - self, - *, - text_prompt: str, - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - qr_code_data: typing.Optional[str] = OMIT, - qr_code_input_image: typing.Optional[str] = OMIT, - qr_code_vcard: typing.Optional[Vcard] = OMIT, - qr_code_file: typing.Optional[str] = OMIT, - use_url_shortener: typing.Optional[bool] = OMIT, - negative_prompt: typing.Optional[str] = OMIT, - image_prompt: typing.Optional[str] = OMIT, - image_prompt_controlnet_models: typing.Optional[ - typing.Sequence[QrCodeGeneratorPageRequestImagePromptControlnetModelsItem] - ] = OMIT, - image_prompt_strength: typing.Optional[float] = OMIT, - image_prompt_scale: typing.Optional[float] = OMIT, - image_prompt_pos_x: typing.Optional[float] = OMIT, - image_prompt_pos_y: typing.Optional[float] = OMIT, - selected_model: typing.Optional[QrCodeGeneratorPageRequestSelectedModel] = OMIT, - selected_controlnet_model: typing.Optional[ - typing.Sequence[QrCodeGeneratorPageRequestSelectedControlnetModelItem] - ] = OMIT, - output_width: typing.Optional[int] = OMIT, - output_height: typing.Optional[int] = OMIT, - guidance_scale: typing.Optional[float] = OMIT, - controlnet_conditioning_scale: typing.Optional[typing.Sequence[float]] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[int] = OMIT, - scheduler: typing.Optional[QrCodeGeneratorPageRequestScheduler] = OMIT, - seed: typing.Optional[int] = OMIT, - obj_scale: typing.Optional[float] = OMIT, - obj_pos_x: typing.Optional[float] = OMIT, - obj_pos_y: typing.Optional[float] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[QrCodeGeneratorPageOutput]: + async def post_v3art_qr_code_async( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> QrCodeGeneratorPageStatusResponse: """ Parameters ---------- - text_prompt : str - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments + request_options : typing.Optional[RequestOptions] + Request-specific configuration. - qr_code_data : typing.Optional[str] + Returns + ------- + QrCodeGeneratorPageStatusResponse + Successful Response - qr_code_input_image : typing.Optional[str] + Examples + -------- + import asyncio - qr_code_vcard : typing.Optional[Vcard] + from gooey import AsyncGooey - qr_code_file : typing.Optional[str] + client = AsyncGooey( + api_key="YOUR_API_KEY", + ) - use_url_shortener : typing.Optional[bool] - negative_prompt : typing.Optional[str] + async def main() -> None: + await client.post_v3art_qr_code_async() - image_prompt : typing.Optional[str] - image_prompt_controlnet_models : typing.Optional[typing.Sequence[QrCodeGeneratorPageRequestImagePromptControlnetModelsItem]] + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "v3/art-qr-code/async", + method="POST", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + QrCodeGeneratorPageStatusResponse, + parse_obj_as( + type_=QrCodeGeneratorPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) - image_prompt_strength : typing.Optional[float] + async def post_v3related_qna_maker_async( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> RelatedQnAPageStatusResponse: + """ + Parameters + ---------- + request_options : typing.Optional[RequestOptions] + Request-specific configuration. - image_prompt_scale : typing.Optional[float] + Returns + ------- + RelatedQnAPageStatusResponse + Successful Response - image_prompt_pos_x : typing.Optional[float] + Examples + -------- + import asyncio - image_prompt_pos_y : typing.Optional[float] + from gooey import AsyncGooey - selected_model : typing.Optional[QrCodeGeneratorPageRequestSelectedModel] + client = AsyncGooey( + api_key="YOUR_API_KEY", + ) - selected_controlnet_model : typing.Optional[typing.Sequence[QrCodeGeneratorPageRequestSelectedControlnetModelItem]] - output_width : typing.Optional[int] + async def main() -> None: + await client.post_v3related_qna_maker_async() - output_height : typing.Optional[int] - guidance_scale : typing.Optional[float] + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "v3/related-qna-maker/async", + method="POST", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + RelatedQnAPageStatusResponse, + parse_obj_as( + type_=RelatedQnAPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) - controlnet_conditioning_scale : typing.Optional[typing.Sequence[float]] + async def post_v3seo_summary_async( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> SeoSummaryPageStatusResponse: + """ + Parameters + ---------- + request_options : typing.Optional[RequestOptions] + Request-specific configuration. - num_outputs : typing.Optional[int] + Returns + ------- + SeoSummaryPageStatusResponse + Successful Response - quality : typing.Optional[int] + Examples + -------- + import asyncio - scheduler : typing.Optional[QrCodeGeneratorPageRequestScheduler] + from gooey import AsyncGooey - seed : typing.Optional[int] + client = AsyncGooey( + api_key="YOUR_API_KEY", + ) - obj_scale : typing.Optional[float] - obj_pos_x : typing.Optional[float] + async def main() -> None: + await client.post_v3seo_summary_async() - obj_pos_y : typing.Optional[float] - settings : typing.Optional[RunSettings] + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "v3/SEOSummary/async", + method="POST", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + SeoSummaryPageStatusResponse, + parse_obj_as( + type_=SeoSummaryPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + async def post_v3google_gpt_async( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> GoogleGptPageStatusResponse: + """ + Parameters + ---------- request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Optional[QrCodeGeneratorPageOutput] + GoogleGptPageStatusResponse Successful Response Examples @@ -4719,194 +1878,42 @@ async def qr_code( async def main() -> None: - await client.qr_code( - text_prompt="text_prompt", - ) + await client.post_v3google_gpt_async() asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/art-qr-code/async", + "v3/google-gpt/async", method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "qr_code_data": qr_code_data, - "qr_code_input_image": qr_code_input_image, - "qr_code_vcard": qr_code_vcard, - "qr_code_file": qr_code_file, - "use_url_shortener": use_url_shortener, - "text_prompt": text_prompt, - "negative_prompt": negative_prompt, - "image_prompt": image_prompt, - "image_prompt_controlnet_models": image_prompt_controlnet_models, - "image_prompt_strength": image_prompt_strength, - "image_prompt_scale": image_prompt_scale, - "image_prompt_pos_x": image_prompt_pos_x, - "image_prompt_pos_y": image_prompt_pos_y, - "selected_model": selected_model, - "selected_controlnet_model": selected_controlnet_model, - "output_width": output_width, - "output_height": output_height, - "guidance_scale": guidance_scale, - "controlnet_conditioning_scale": controlnet_conditioning_scale, - "num_outputs": num_outputs, - "quality": quality, - "scheduler": scheduler, - "seed": seed, - "obj_scale": obj_scale, - "obj_pos_x": obj_pos_x, - "obj_pos_y": obj_pos_y, - "settings": settings, - }, request_options=request_options, - omit=OMIT, ) try: if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( - QrCodeGeneratorPageStatusResponse, + return typing.cast( + GoogleGptPageStatusResponse, parse_obj_as( - type_=QrCodeGeneratorPageStatusResponse, # type: ignore + type_=GoogleGptPageStatusResponse, # type: ignore object_=_response.json(), ), ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def seo_people_also_ask( - self, - *, - search_query: str, - site_filter: str, - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - task_instructions: typing.Optional[str] = OMIT, - query_instructions: typing.Optional[str] = OMIT, - selected_model: typing.Optional[RelatedQnAPageRequestSelectedModel] = OMIT, - max_search_urls: typing.Optional[int] = OMIT, - max_references: typing.Optional[int] = OMIT, - max_context_words: typing.Optional[int] = OMIT, - scroll_jump: typing.Optional[int] = OMIT, - embedding_model: typing.Optional[RelatedQnAPageRequestEmbeddingModel] = OMIT, - dense_weight: typing.Optional[float] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[RelatedQnAPageRequestResponseFormatType] = OMIT, - serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, - scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, - serp_search_type: typing.Optional[SerpSearchType] = OMIT, - scaleserp_search_field: typing.Optional[str] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[RelatedQnAPageOutput]: + async def post_v3social_lookup_email_async( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> SocialLookupEmailPageStatusResponse: """ Parameters ---------- - search_query : str - - site_filter : str - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - task_instructions : typing.Optional[str] - - query_instructions : typing.Optional[str] - - selected_model : typing.Optional[RelatedQnAPageRequestSelectedModel] - - max_search_urls : typing.Optional[int] - - max_references : typing.Optional[int] - - max_context_words : typing.Optional[int] - - scroll_jump : typing.Optional[int] - - embedding_model : typing.Optional[RelatedQnAPageRequestEmbeddingModel] - - dense_weight : typing.Optional[float] - - Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. - Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - response_format_type : typing.Optional[RelatedQnAPageRequestResponseFormatType] - - serp_search_location : typing.Optional[SerpSearchLocation] - - scaleserp_locations : typing.Optional[typing.Sequence[str]] - DEPRECATED: use `serp_search_location` instead - - serp_search_type : typing.Optional[SerpSearchType] - - scaleserp_search_field : typing.Optional[str] - DEPRECATED: use `serp_search_type` instead - - settings : typing.Optional[RunSettings] - request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Optional[RelatedQnAPageOutput] + SocialLookupEmailPageStatusResponse Successful Response Examples @@ -4921,176 +1928,42 @@ async def seo_people_also_ask( async def main() -> None: - await client.seo_people_also_ask( - search_query="search_query", - site_filter="site_filter", - ) + await client.post_v3social_lookup_email_async() asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/related-qna-maker/async", + "v3/SocialLookupEmail/async", method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "search_query": search_query, - "site_filter": site_filter, - "task_instructions": task_instructions, - "query_instructions": query_instructions, - "selected_model": selected_model, - "max_search_urls": max_search_urls, - "max_references": max_references, - "max_context_words": max_context_words, - "scroll_jump": scroll_jump, - "embedding_model": embedding_model, - "dense_weight": dense_weight, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "response_format_type": response_format_type, - "serp_search_location": serp_search_location, - "scaleserp_locations": scaleserp_locations, - "serp_search_type": serp_search_type, - "scaleserp_search_field": scaleserp_search_field, - "settings": settings, - }, request_options=request_options, - omit=OMIT, ) try: if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( - RelatedQnAPageStatusResponse, + return typing.cast( + SocialLookupEmailPageStatusResponse, parse_obj_as( - type_=RelatedQnAPageStatusResponse, # type: ignore + type_=SocialLookupEmailPageStatusResponse, # type: ignore object_=_response.json(), ), ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def seo_content( - self, - *, - search_query: str, - keywords: str, - title: str, - company_url: str, - example_id: typing.Optional[str] = None, - task_instructions: typing.Optional[str] = OMIT, - enable_html: typing.Optional[bool] = OMIT, - selected_model: typing.Optional[SeoSummaryPageRequestSelectedModel] = OMIT, - max_search_urls: typing.Optional[int] = OMIT, - enable_crosslinks: typing.Optional[bool] = OMIT, - seed: typing.Optional[int] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[SeoSummaryPageRequestResponseFormatType] = OMIT, - serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, - scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, - serp_search_type: typing.Optional[SerpSearchType] = OMIT, - scaleserp_search_field: typing.Optional[str] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[SeoSummaryPageOutput]: + async def post_v3bulk_runner_async( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> BulkRunnerPageStatusResponse: """ Parameters ---------- - search_query : str - - keywords : str - - title : str - - company_url : str - - example_id : typing.Optional[str] - - task_instructions : typing.Optional[str] - - enable_html : typing.Optional[bool] - - selected_model : typing.Optional[SeoSummaryPageRequestSelectedModel] - - max_search_urls : typing.Optional[int] - - enable_crosslinks : typing.Optional[bool] - - seed : typing.Optional[int] - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - response_format_type : typing.Optional[SeoSummaryPageRequestResponseFormatType] - - serp_search_location : typing.Optional[SerpSearchLocation] - - scaleserp_locations : typing.Optional[typing.Sequence[str]] - DEPRECATED: use `serp_search_location` instead - - serp_search_type : typing.Optional[SerpSearchType] - - scaleserp_search_field : typing.Optional[str] - DEPRECATED: use `serp_search_type` instead - - settings : typing.Optional[RunSettings] - request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Optional[SeoSummaryPageOutput] + BulkRunnerPageStatusResponse Successful Response Examples @@ -5105,189 +1978,42 @@ async def seo_content( async def main() -> None: - await client.seo_content( - search_query="search_query", - keywords="keywords", - title="title", - company_url="company_url", - ) + await client.post_v3bulk_runner_async() asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/SEOSummary/async", + "v3/bulk-runner/async", method="POST", - params={ - "example_id": example_id, - }, - json={ - "search_query": search_query, - "keywords": keywords, - "title": title, - "company_url": company_url, - "task_instructions": task_instructions, - "enable_html": enable_html, - "selected_model": selected_model, - "max_search_urls": max_search_urls, - "enable_crosslinks": enable_crosslinks, - "seed": seed, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "response_format_type": response_format_type, - "serp_search_location": serp_search_location, - "scaleserp_locations": scaleserp_locations, - "serp_search_type": serp_search_type, - "scaleserp_search_field": scaleserp_search_field, - "settings": settings, - }, request_options=request_options, - omit=OMIT, ) try: if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( - SeoSummaryPageStatusResponse, + return typing.cast( + BulkRunnerPageStatusResponse, parse_obj_as( - type_=SeoSummaryPageStatusResponse, # type: ignore + type_=BulkRunnerPageStatusResponse, # type: ignore object_=_response.json(), ), ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def web_search_llm( - self, - *, - search_query: str, - site_filter: str, - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - task_instructions: typing.Optional[str] = OMIT, - query_instructions: typing.Optional[str] = OMIT, - selected_model: typing.Optional[GoogleGptPageRequestSelectedModel] = OMIT, - max_search_urls: typing.Optional[int] = OMIT, - max_references: typing.Optional[int] = OMIT, - max_context_words: typing.Optional[int] = OMIT, - scroll_jump: typing.Optional[int] = OMIT, - embedding_model: typing.Optional[GoogleGptPageRequestEmbeddingModel] = OMIT, - dense_weight: typing.Optional[float] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[GoogleGptPageRequestResponseFormatType] = OMIT, - serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, - scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, - serp_search_type: typing.Optional[SerpSearchType] = OMIT, - scaleserp_search_field: typing.Optional[str] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[GoogleGptPageOutput]: + async def post_v3bulk_eval_async( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> BulkEvalPageStatusResponse: """ Parameters ---------- - search_query : str - - site_filter : str - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - task_instructions : typing.Optional[str] - - query_instructions : typing.Optional[str] - - selected_model : typing.Optional[GoogleGptPageRequestSelectedModel] - - max_search_urls : typing.Optional[int] - - max_references : typing.Optional[int] - - max_context_words : typing.Optional[int] - - scroll_jump : typing.Optional[int] - - embedding_model : typing.Optional[GoogleGptPageRequestEmbeddingModel] - - dense_weight : typing.Optional[float] - - Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. - Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - response_format_type : typing.Optional[GoogleGptPageRequestResponseFormatType] - - serp_search_location : typing.Optional[SerpSearchLocation] - - scaleserp_locations : typing.Optional[typing.Sequence[str]] - DEPRECATED: use `serp_search_location` instead - - serp_search_type : typing.Optional[SerpSearchType] - - scaleserp_search_field : typing.Optional[str] - DEPRECATED: use `serp_search_type` instead - - settings : typing.Optional[RunSettings] - request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Optional[GoogleGptPageOutput] + BulkEvalPageStatusResponse Successful Response Examples @@ -5302,148 +2028,42 @@ async def web_search_llm( async def main() -> None: - await client.web_search_llm( - search_query="search_query", - site_filter="site_filter", - ) + await client.post_v3bulk_eval_async() asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/google-gpt/async", + "v3/bulk-eval/async", method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "search_query": search_query, - "site_filter": site_filter, - "task_instructions": task_instructions, - "query_instructions": query_instructions, - "selected_model": selected_model, - "max_search_urls": max_search_urls, - "max_references": max_references, - "max_context_words": max_context_words, - "scroll_jump": scroll_jump, - "embedding_model": embedding_model, - "dense_weight": dense_weight, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "response_format_type": response_format_type, - "serp_search_location": serp_search_location, - "scaleserp_locations": scaleserp_locations, - "serp_search_type": serp_search_type, - "scaleserp_search_field": scaleserp_search_field, - "settings": settings, - }, request_options=request_options, - omit=OMIT, ) try: if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( - GoogleGptPageStatusResponse, + return typing.cast( + BulkEvalPageStatusResponse, parse_obj_as( - type_=GoogleGptPageStatusResponse, # type: ignore + type_=BulkEvalPageStatusResponse, # type: ignore object_=_response.json(), ), ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def personalize_email( - self, - *, - email_address: str, - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - input_prompt: typing.Optional[str] = OMIT, - selected_model: typing.Optional[SocialLookupEmailPageRequestSelectedModel] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[SocialLookupEmailPageRequestResponseFormatType] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[SocialLookupEmailPageOutput]: + async def post_v3doc_extract_async( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> DocExtractPageStatusResponse: """ Parameters ---------- - email_address : str - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - input_prompt : typing.Optional[str] - - selected_model : typing.Optional[SocialLookupEmailPageRequestSelectedModel] - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - response_format_type : typing.Optional[SocialLookupEmailPageRequestResponseFormatType] - - settings : typing.Optional[RunSettings] - request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Optional[SocialLookupEmailPageOutput] + DocExtractPageStatusResponse Successful Response Examples @@ -5458,141 +2078,42 @@ async def personalize_email( async def main() -> None: - await client.personalize_email( - email_address="email_address", - ) + await client.post_v3doc_extract_async() asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/SocialLookupEmail/async", + "v3/doc-extract/async", method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "email_address": email_address, - "input_prompt": input_prompt, - "selected_model": selected_model, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "response_format_type": response_format_type, - "settings": settings, - }, request_options=request_options, - omit=OMIT, ) try: if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( - SocialLookupEmailPageStatusResponse, + return typing.cast( + DocExtractPageStatusResponse, parse_obj_as( - type_=SocialLookupEmailPageStatusResponse, # type: ignore + type_=DocExtractPageStatusResponse, # type: ignore object_=_response.json(), ), ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def bulk_run( - self, - *, - documents: typing.Sequence[str], - run_urls: typing.Sequence[str], - input_columns: typing.Dict[str, str], - output_columns: typing.Dict[str, str], - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - eval_urls: typing.Optional[typing.Sequence[str]] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[BulkRunnerPageOutput]: + async def post_v3compare_llm_async( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> CompareLlmPageStatusResponse: """ Parameters ---------- - documents : typing.Sequence[str] - - Upload or link to a CSV or google sheet that contains your sample input data. - For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs. - Remember to includes header names in your CSV too. - - - run_urls : typing.Sequence[str] - - Provide one or more Gooey.AI workflow runs. - You can add multiple runs from the same recipe (e.g. two versions of your copilot) and we'll run the inputs over both of them. - - - input_columns : typing.Dict[str, str] - - For each input field in the Gooey.AI workflow, specify the column in your input data that corresponds to it. - - - output_columns : typing.Dict[str, str] - - For each output field in the Gooey.AI workflow, specify the column name that you'd like to use for it in the output data. - - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - eval_urls : typing.Optional[typing.Sequence[str]] - - _(optional)_ Add one or more Gooey.AI Evaluator Workflows to evaluate the results of your runs. - - - settings : typing.Optional[RunSettings] - request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Optional[BulkRunnerPageOutput] + CompareLlmPageStatusResponse Successful Response Examples @@ -5607,148 +2128,42 @@ async def bulk_run( async def main() -> None: - await client.bulk_run( - documents=["documents"], - run_urls=["run_urls"], - input_columns={"key": "value"}, - output_columns={"key": "value"}, - ) + await client.post_v3compare_llm_async() asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/bulk-runner/async", + "v3/CompareLLM/async", method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "documents": documents, - "run_urls": run_urls, - "input_columns": input_columns, - "output_columns": output_columns, - "eval_urls": eval_urls, - "settings": settings, - }, request_options=request_options, - omit=OMIT, ) try: if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( - BulkRunnerPageStatusResponse, + return typing.cast( + CompareLlmPageStatusResponse, parse_obj_as( - type_=BulkRunnerPageStatusResponse, # type: ignore + type_=CompareLlmPageStatusResponse, # type: ignore object_=_response.json(), ), ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def synthesize_data( - self, - *, - documents: typing.Sequence[str], - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - sheet_url: typing.Optional[str] = OMIT, - selected_asr_model: typing.Optional[DocExtractPageRequestSelectedAsrModel] = OMIT, - google_translate_target: typing.Optional[str] = OMIT, - glossary_document: typing.Optional[str] = OMIT, - task_instructions: typing.Optional[str] = OMIT, - selected_model: typing.Optional[DocExtractPageRequestSelectedModel] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[DocExtractPageRequestResponseFormatType] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[DocExtractPageOutput]: + async def post_v3doc_search_async( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> DocSearchPageStatusResponse: """ Parameters ---------- - documents : typing.Sequence[str] - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - sheet_url : typing.Optional[str] - - selected_asr_model : typing.Optional[DocExtractPageRequestSelectedAsrModel] - - google_translate_target : typing.Optional[str] - - glossary_document : typing.Optional[str] - Provide a glossary to customize translation and improve accuracy of domain-specific terms. - If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing). - - task_instructions : typing.Optional[str] - - selected_model : typing.Optional[DocExtractPageRequestSelectedModel] - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - response_format_type : typing.Optional[DocExtractPageRequestResponseFormatType] - - settings : typing.Optional[RunSettings] - request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Optional[DocExtractPageOutput] + DocSearchPageStatusResponse Successful Response Examples @@ -5763,136 +2178,92 @@ async def synthesize_data( async def main() -> None: - await client.synthesize_data( - documents=["documents"], - ) + await client.post_v3doc_search_async() asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/doc-extract/async", + "v3/doc-search/async", method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "documents": documents, - "sheet_url": sheet_url, - "selected_asr_model": selected_asr_model, - "google_translate_target": google_translate_target, - "glossary_document": glossary_document, - "task_instructions": task_instructions, - "selected_model": selected_model, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "response_format_type": response_format_type, - "settings": settings, - }, request_options=request_options, - omit=OMIT, ) try: if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( - DocExtractPageStatusResponse, + return typing.cast( + DocSearchPageStatusResponse, parse_obj_as( - type_=DocExtractPageStatusResponse, # type: ignore + type_=DocSearchPageStatusResponse, # type: ignore object_=_response.json(), ), ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def llm( - self, - *, - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - input_prompt: typing.Optional[str] = OMIT, - selected_models: typing.Optional[typing.Sequence[CompareLlmPageRequestSelectedModelsItem]] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[CompareLlmPageRequestResponseFormatType] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[CompareLlmPageOutput]: + async def post_v3smart_gpt_async( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> SmartGptPageStatusResponse: """ Parameters ---------- - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - input_prompt : typing.Optional[str] + request_options : typing.Optional[RequestOptions] + Request-specific configuration. - selected_models : typing.Optional[typing.Sequence[CompareLlmPageRequestSelectedModelsItem]] + Returns + ------- + SmartGptPageStatusResponse + Successful Response - avoid_repetition : typing.Optional[bool] + Examples + -------- + import asyncio - num_outputs : typing.Optional[int] + from gooey import AsyncGooey - quality : typing.Optional[float] + client = AsyncGooey( + api_key="YOUR_API_KEY", + ) - max_tokens : typing.Optional[int] - sampling_temperature : typing.Optional[float] + async def main() -> None: + await client.post_v3smart_gpt_async() - response_format_type : typing.Optional[CompareLlmPageRequestResponseFormatType] - settings : typing.Optional[RunSettings] + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "v3/SmartGPT/async", + method="POST", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + SmartGptPageStatusResponse, + parse_obj_as( + type_=SmartGptPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + async def post_v3doc_summary_async( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> DocSummaryPageStatusResponse: + """ + Parameters + ---------- request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Optional[CompareLlmPageOutput] + DocSummaryPageStatusResponse Successful Response Examples @@ -5907,166 +2278,42 @@ async def llm( async def main() -> None: - await client.llm() + await client.post_v3doc_summary_async() asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/CompareLLM/async", + "v3/doc-summary/async", method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "input_prompt": input_prompt, - "selected_models": selected_models, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "response_format_type": response_format_type, - "settings": settings, - }, request_options=request_options, - omit=OMIT, ) try: if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( - CompareLlmPageStatusResponse, + return typing.cast( + DocSummaryPageStatusResponse, parse_obj_as( - type_=CompareLlmPageStatusResponse, # type: ignore + type_=DocSummaryPageStatusResponse, # type: ignore object_=_response.json(), ), ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def rag( - self, - *, - search_query: str, - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - keyword_query: typing.Optional[DocSearchPageRequestKeywordQuery] = OMIT, - documents: typing.Optional[typing.Sequence[str]] = OMIT, - max_references: typing.Optional[int] = OMIT, - max_context_words: typing.Optional[int] = OMIT, - scroll_jump: typing.Optional[int] = OMIT, - doc_extract_url: typing.Optional[str] = OMIT, - embedding_model: typing.Optional[DocSearchPageRequestEmbeddingModel] = OMIT, - dense_weight: typing.Optional[float] = OMIT, - task_instructions: typing.Optional[str] = OMIT, - query_instructions: typing.Optional[str] = OMIT, - selected_model: typing.Optional[DocSearchPageRequestSelectedModel] = OMIT, - citation_style: typing.Optional[DocSearchPageRequestCitationStyle] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[DocSearchPageRequestResponseFormatType] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[DocSearchPageOutput]: + async def post_v3functions_async( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> FunctionsPageStatusResponse: """ Parameters ---------- - search_query : str - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - keyword_query : typing.Optional[DocSearchPageRequestKeywordQuery] - - documents : typing.Optional[typing.Sequence[str]] - - max_references : typing.Optional[int] - - max_context_words : typing.Optional[int] - - scroll_jump : typing.Optional[int] - - doc_extract_url : typing.Optional[str] - - embedding_model : typing.Optional[DocSearchPageRequestEmbeddingModel] - - dense_weight : typing.Optional[float] - - Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. - Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - - - task_instructions : typing.Optional[str] - - query_instructions : typing.Optional[str] - - selected_model : typing.Optional[DocSearchPageRequestSelectedModel] - - citation_style : typing.Optional[DocSearchPageRequestCitationStyle] - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - response_format_type : typing.Optional[DocSearchPageRequestResponseFormatType] - - settings : typing.Optional[RunSettings] - request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Optional[DocSearchPageOutput] + FunctionsPageStatusResponse Successful Response Examples @@ -6081,157 +2328,42 @@ async def rag( async def main() -> None: - await client.rag( - search_query="search_query", - ) + await client.post_v3functions_async() asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/doc-search/async", + "v3/functions/async", method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "search_query": search_query, - "keyword_query": keyword_query, - "documents": documents, - "max_references": max_references, - "max_context_words": max_context_words, - "scroll_jump": scroll_jump, - "doc_extract_url": doc_extract_url, - "embedding_model": embedding_model, - "dense_weight": dense_weight, - "task_instructions": task_instructions, - "query_instructions": query_instructions, - "selected_model": selected_model, - "citation_style": citation_style, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "response_format_type": response_format_type, - "settings": settings, - }, request_options=request_options, - omit=OMIT, ) try: if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( - DocSearchPageStatusResponse, + return typing.cast( + FunctionsPageStatusResponse, parse_obj_as( - type_=DocSearchPageStatusResponse, # type: ignore + type_=FunctionsPageStatusResponse, # type: ignore object_=_response.json(), ), ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def doc_summary( - self, - *, - documents: typing.Sequence[str], - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - task_instructions: typing.Optional[str] = OMIT, - merge_instructions: typing.Optional[str] = OMIT, - selected_model: typing.Optional[DocSummaryPageRequestSelectedModel] = OMIT, - chain_type: typing.Optional[typing.Literal["map_reduce"]] = OMIT, - selected_asr_model: typing.Optional[DocSummaryPageRequestSelectedAsrModel] = OMIT, - google_translate_target: typing.Optional[str] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[DocSummaryPageRequestResponseFormatType] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[DocSummaryPageOutput]: + async def post_v3lipsync_async( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> LipsyncPageStatusResponse: """ Parameters ---------- - documents : typing.Sequence[str] - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - task_instructions : typing.Optional[str] - - merge_instructions : typing.Optional[str] - - selected_model : typing.Optional[DocSummaryPageRequestSelectedModel] - - chain_type : typing.Optional[typing.Literal["map_reduce"]] - - selected_asr_model : typing.Optional[DocSummaryPageRequestSelectedAsrModel] - - google_translate_target : typing.Optional[str] - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - response_format_type : typing.Optional[DocSummaryPageRequestResponseFormatType] - - settings : typing.Optional[RunSettings] - request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Optional[DocSummaryPageOutput] + LipsyncPageStatusResponse Successful Response Examples @@ -6246,191 +2378,42 @@ async def doc_summary( async def main() -> None: - await client.doc_summary( - documents=["documents"], - ) + await client.post_v3lipsync_async() asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/doc-summary/async", + "v3/Lipsync/async", method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "documents": documents, - "task_instructions": task_instructions, - "merge_instructions": merge_instructions, - "selected_model": selected_model, - "chain_type": chain_type, - "selected_asr_model": selected_asr_model, - "google_translate_target": google_translate_target, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "response_format_type": response_format_type, - "settings": settings, - }, request_options=request_options, - omit=OMIT, ) try: if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( - DocSummaryPageStatusResponse, + return typing.cast( + LipsyncPageStatusResponse, parse_obj_as( - type_=DocSummaryPageStatusResponse, # type: ignore + type_=LipsyncPageStatusResponse, # type: ignore object_=_response.json(), ), ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def lipsync_tts( - self, - *, - text_prompt: str, - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - tts_provider: typing.Optional[LipsyncTtsPageRequestTtsProvider] = OMIT, - uberduck_voice_name: typing.Optional[str] = OMIT, - uberduck_speaking_rate: typing.Optional[float] = OMIT, - google_voice_name: typing.Optional[str] = OMIT, - google_speaking_rate: typing.Optional[float] = OMIT, - google_pitch: typing.Optional[float] = OMIT, - bark_history_prompt: typing.Optional[str] = OMIT, - elevenlabs_voice_name: typing.Optional[str] = OMIT, - elevenlabs_api_key: typing.Optional[str] = OMIT, - elevenlabs_voice_id: typing.Optional[str] = OMIT, - elevenlabs_model: typing.Optional[str] = OMIT, - elevenlabs_stability: typing.Optional[float] = OMIT, - elevenlabs_similarity_boost: typing.Optional[float] = OMIT, - elevenlabs_style: typing.Optional[float] = OMIT, - elevenlabs_speaker_boost: typing.Optional[bool] = OMIT, - azure_voice_name: typing.Optional[str] = OMIT, - openai_voice_name: typing.Optional[LipsyncTtsPageRequestOpenaiVoiceName] = OMIT, - openai_tts_model: typing.Optional[LipsyncTtsPageRequestOpenaiTtsModel] = OMIT, - input_face: typing.Optional[str] = OMIT, - face_padding_top: typing.Optional[int] = OMIT, - face_padding_bottom: typing.Optional[int] = OMIT, - face_padding_left: typing.Optional[int] = OMIT, - face_padding_right: typing.Optional[int] = OMIT, - sadtalker_settings: typing.Optional[SadTalkerSettings] = OMIT, - selected_model: typing.Optional[LipsyncTtsPageRequestSelectedModel] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[LipsyncTtsPageOutput]: + async def post_v3lipsync_tts_async( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> LipsyncTtsPageStatusResponse: """ Parameters ---------- - text_prompt : str - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - tts_provider : typing.Optional[LipsyncTtsPageRequestTtsProvider] - - uberduck_voice_name : typing.Optional[str] - - uberduck_speaking_rate : typing.Optional[float] - - google_voice_name : typing.Optional[str] - - google_speaking_rate : typing.Optional[float] - - google_pitch : typing.Optional[float] - - bark_history_prompt : typing.Optional[str] - - elevenlabs_voice_name : typing.Optional[str] - Use `elevenlabs_voice_id` instead - - elevenlabs_api_key : typing.Optional[str] - - elevenlabs_voice_id : typing.Optional[str] - - elevenlabs_model : typing.Optional[str] - - elevenlabs_stability : typing.Optional[float] - - elevenlabs_similarity_boost : typing.Optional[float] - - elevenlabs_style : typing.Optional[float] - - elevenlabs_speaker_boost : typing.Optional[bool] - - azure_voice_name : typing.Optional[str] - - openai_voice_name : typing.Optional[LipsyncTtsPageRequestOpenaiVoiceName] - - openai_tts_model : typing.Optional[LipsyncTtsPageRequestOpenaiTtsModel] - - input_face : typing.Optional[str] - - face_padding_top : typing.Optional[int] - - face_padding_bottom : typing.Optional[int] - - face_padding_left : typing.Optional[int] - - face_padding_right : typing.Optional[int] - - sadtalker_settings : typing.Optional[SadTalkerSettings] - - selected_model : typing.Optional[LipsyncTtsPageRequestSelectedModel] - - settings : typing.Optional[RunSettings] - request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Optional[LipsyncTtsPageOutput] + LipsyncTtsPageStatusResponse Successful Response Examples @@ -6445,9 +2428,7 @@ async def lipsync_tts( async def main() -> None: - await client.lipsync_tts( - text_prompt="text_prompt", - ) + await client.post_v3lipsync_tts_async() asyncio.run(main()) @@ -6455,173 +2436,34 @@ async def main() -> None: _response = await self._client_wrapper.httpx_client.request( "v3/LipsyncTTS/async", method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "text_prompt": text_prompt, - "tts_provider": tts_provider, - "uberduck_voice_name": uberduck_voice_name, - "uberduck_speaking_rate": uberduck_speaking_rate, - "google_voice_name": google_voice_name, - "google_speaking_rate": google_speaking_rate, - "google_pitch": google_pitch, - "bark_history_prompt": bark_history_prompt, - "elevenlabs_voice_name": elevenlabs_voice_name, - "elevenlabs_api_key": elevenlabs_api_key, - "elevenlabs_voice_id": elevenlabs_voice_id, - "elevenlabs_model": elevenlabs_model, - "elevenlabs_stability": elevenlabs_stability, - "elevenlabs_similarity_boost": elevenlabs_similarity_boost, - "elevenlabs_style": elevenlabs_style, - "elevenlabs_speaker_boost": elevenlabs_speaker_boost, - "azure_voice_name": azure_voice_name, - "openai_voice_name": openai_voice_name, - "openai_tts_model": openai_tts_model, - "input_face": input_face, - "face_padding_top": face_padding_top, - "face_padding_bottom": face_padding_bottom, - "face_padding_left": face_padding_left, - "face_padding_right": face_padding_right, - "sadtalker_settings": sadtalker_settings, - "selected_model": selected_model, - "settings": settings, - }, request_options=request_options, - omit=OMIT, ) try: if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( + return typing.cast( LipsyncTtsPageStatusResponse, parse_obj_as( type_=LipsyncTtsPageStatusResponse, # type: ignore object_=_response.json(), ), ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def text_to_speech( - self, - *, - text_prompt: str, - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - tts_provider: typing.Optional[TextToSpeechPageRequestTtsProvider] = OMIT, - uberduck_voice_name: typing.Optional[str] = OMIT, - uberduck_speaking_rate: typing.Optional[float] = OMIT, - google_voice_name: typing.Optional[str] = OMIT, - google_speaking_rate: typing.Optional[float] = OMIT, - google_pitch: typing.Optional[float] = OMIT, - bark_history_prompt: typing.Optional[str] = OMIT, - elevenlabs_voice_name: typing.Optional[str] = OMIT, - elevenlabs_api_key: typing.Optional[str] = OMIT, - elevenlabs_voice_id: typing.Optional[str] = OMIT, - elevenlabs_model: typing.Optional[str] = OMIT, - elevenlabs_stability: typing.Optional[float] = OMIT, - elevenlabs_similarity_boost: typing.Optional[float] = OMIT, - elevenlabs_style: typing.Optional[float] = OMIT, - elevenlabs_speaker_boost: typing.Optional[bool] = OMIT, - azure_voice_name: typing.Optional[str] = OMIT, - openai_voice_name: typing.Optional[TextToSpeechPageRequestOpenaiVoiceName] = OMIT, - openai_tts_model: typing.Optional[TextToSpeechPageRequestOpenaiTtsModel] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[TextToSpeechPageOutput]: + async def post_v3text_to_speech_async( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> TextToSpeechPageStatusResponse: """ Parameters ---------- - text_prompt : str - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - tts_provider : typing.Optional[TextToSpeechPageRequestTtsProvider] - - uberduck_voice_name : typing.Optional[str] - - uberduck_speaking_rate : typing.Optional[float] - - google_voice_name : typing.Optional[str] - - google_speaking_rate : typing.Optional[float] - - google_pitch : typing.Optional[float] - - bark_history_prompt : typing.Optional[str] - - elevenlabs_voice_name : typing.Optional[str] - Use `elevenlabs_voice_id` instead - - elevenlabs_api_key : typing.Optional[str] - - elevenlabs_voice_id : typing.Optional[str] - - elevenlabs_model : typing.Optional[str] - - elevenlabs_stability : typing.Optional[float] - - elevenlabs_similarity_boost : typing.Optional[float] - - elevenlabs_style : typing.Optional[float] - - elevenlabs_speaker_boost : typing.Optional[bool] - - azure_voice_name : typing.Optional[str] - - openai_voice_name : typing.Optional[TextToSpeechPageRequestOpenaiVoiceName] - - openai_tts_model : typing.Optional[TextToSpeechPageRequestOpenaiTtsModel] - - settings : typing.Optional[RunSettings] - request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Optional[TextToSpeechPageOutput] + TextToSpeechPageStatusResponse Successful Response Examples @@ -6636,9 +2478,7 @@ async def text_to_speech( async def main() -> None: - await client.text_to_speech( - text_prompt="text_prompt", - ) + await client.post_v3text_to_speech_async() asyncio.run(main()) @@ -6646,138 +2486,34 @@ async def main() -> None: _response = await self._client_wrapper.httpx_client.request( "v3/TextToSpeech/async", method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "text_prompt": text_prompt, - "tts_provider": tts_provider, - "uberduck_voice_name": uberduck_voice_name, - "uberduck_speaking_rate": uberduck_speaking_rate, - "google_voice_name": google_voice_name, - "google_speaking_rate": google_speaking_rate, - "google_pitch": google_pitch, - "bark_history_prompt": bark_history_prompt, - "elevenlabs_voice_name": elevenlabs_voice_name, - "elevenlabs_api_key": elevenlabs_api_key, - "elevenlabs_voice_id": elevenlabs_voice_id, - "elevenlabs_model": elevenlabs_model, - "elevenlabs_stability": elevenlabs_stability, - "elevenlabs_similarity_boost": elevenlabs_similarity_boost, - "elevenlabs_style": elevenlabs_style, - "elevenlabs_speaker_boost": elevenlabs_speaker_boost, - "azure_voice_name": azure_voice_name, - "openai_voice_name": openai_voice_name, - "openai_tts_model": openai_tts_model, - "settings": settings, - }, request_options=request_options, - omit=OMIT, ) try: if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( + return typing.cast( TextToSpeechPageStatusResponse, parse_obj_as( type_=TextToSpeechPageStatusResponse, # type: ignore object_=_response.json(), ), ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def speech_recognition( - self, - *, - documents: typing.Sequence[str], - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - selected_model: typing.Optional[AsrPageRequestSelectedModel] = OMIT, - language: typing.Optional[str] = OMIT, - translation_model: typing.Optional[AsrPageRequestTranslationModel] = OMIT, - output_format: typing.Optional[AsrPageRequestOutputFormat] = OMIT, - google_translate_target: typing.Optional[str] = OMIT, - translation_source: typing.Optional[str] = OMIT, - translation_target: typing.Optional[str] = OMIT, - glossary_document: typing.Optional[str] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[AsrPageOutput]: + async def post_v3asr_async( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> AsrPageStatusResponse: """ Parameters ---------- - documents : typing.Sequence[str] - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - selected_model : typing.Optional[AsrPageRequestSelectedModel] - - language : typing.Optional[str] - - translation_model : typing.Optional[AsrPageRequestTranslationModel] - - output_format : typing.Optional[AsrPageRequestOutputFormat] - - google_translate_target : typing.Optional[str] - use `translation_model` & `translation_target` instead. - - translation_source : typing.Optional[str] - - translation_target : typing.Optional[str] - - glossary_document : typing.Optional[str] - Provide a glossary to customize translation and improve accuracy of domain-specific terms. - If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing). - - settings : typing.Optional[RunSettings] - request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Optional[AsrPageOutput] + AsrPageStatusResponse Successful Response Examples @@ -6792,9 +2528,7 @@ async def speech_recognition( async def main() -> None: - await client.speech_recognition( - documents=["documents"], - ) + await client.post_v3asr_async() asyncio.run(main()) @@ -6802,125 +2536,34 @@ async def main() -> None: _response = await self._client_wrapper.httpx_client.request( "v3/asr/async", method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "documents": documents, - "selected_model": selected_model, - "language": language, - "translation_model": translation_model, - "output_format": output_format, - "google_translate_target": google_translate_target, - "translation_source": translation_source, - "translation_target": translation_target, - "glossary_document": glossary_document, - "settings": settings, - }, request_options=request_options, - omit=OMIT, ) try: if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( + return typing.cast( AsrPageStatusResponse, parse_obj_as( type_=AsrPageStatusResponse, # type: ignore object_=_response.json(), ), ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def text_to_music( - self, - *, - text_prompt: str, - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - negative_prompt: typing.Optional[str] = OMIT, - duration_sec: typing.Optional[float] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[int] = OMIT, - guidance_scale: typing.Optional[float] = OMIT, - seed: typing.Optional[int] = OMIT, - sd2upscaling: typing.Optional[bool] = OMIT, - selected_models: typing.Optional[typing.Sequence[typing.Literal["audio_ldm"]]] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[Text2AudioPageOutput]: + async def post_v3text2audio_async( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> Text2AudioPageStatusResponse: """ Parameters ---------- - text_prompt : str - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - negative_prompt : typing.Optional[str] - - duration_sec : typing.Optional[float] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[int] - - guidance_scale : typing.Optional[float] - - seed : typing.Optional[int] - - sd2upscaling : typing.Optional[bool] - - selected_models : typing.Optional[typing.Sequence[typing.Literal["audio_ldm"]]] - - settings : typing.Optional[RunSettings] - request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Optional[Text2AudioPageOutput] + Text2AudioPageStatusResponse Successful Response Examples @@ -6935,9 +2578,7 @@ async def text_to_music( async def main() -> None: - await client.text_to_music( - text_prompt="text_prompt", - ) + await client.post_v3text2audio_async() asyncio.run(main()) @@ -6945,115 +2586,34 @@ async def main() -> None: _response = await self._client_wrapper.httpx_client.request( "v3/text2audio/async", method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "text_prompt": text_prompt, - "negative_prompt": negative_prompt, - "duration_sec": duration_sec, - "num_outputs": num_outputs, - "quality": quality, - "guidance_scale": guidance_scale, - "seed": seed, - "sd_2_upscaling": sd2upscaling, - "selected_models": selected_models, - "settings": settings, - }, request_options=request_options, - omit=OMIT, ) try: if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( + return typing.cast( Text2AudioPageStatusResponse, parse_obj_as( type_=Text2AudioPageStatusResponse, # type: ignore object_=_response.json(), ), ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def translate( - self, - *, - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - texts: typing.Optional[typing.Sequence[str]] = OMIT, - selected_model: typing.Optional[TranslationPageRequestSelectedModel] = OMIT, - translation_source: typing.Optional[str] = OMIT, - translation_target: typing.Optional[str] = OMIT, - glossary_document: typing.Optional[str] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[TranslationPageOutput]: + async def post_v3translate_async( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> TranslationPageStatusResponse: """ Parameters ---------- - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - texts : typing.Optional[typing.Sequence[str]] - - selected_model : typing.Optional[TranslationPageRequestSelectedModel] - - translation_source : typing.Optional[str] - - translation_target : typing.Optional[str] - - glossary_document : typing.Optional[str] - Provide a glossary to customize translation and improve accuracy of domain-specific terms. - If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing). - - settings : typing.Optional[RunSettings] - request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Optional[TranslationPageOutput] + TranslationPageStatusResponse Successful Response Examples @@ -7068,7 +2628,7 @@ async def translate( async def main() -> None: - await client.translate() + await client.post_v3translate_async() asyncio.run(main()) @@ -7076,136 +2636,34 @@ async def main() -> None: _response = await self._client_wrapper.httpx_client.request( "v3/translate/async", method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "texts": texts, - "selected_model": selected_model, - "translation_source": translation_source, - "translation_target": translation_target, - "glossary_document": glossary_document, - "settings": settings, - }, request_options=request_options, - omit=OMIT, ) try: if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( + return typing.cast( TranslationPageStatusResponse, parse_obj_as( type_=TranslationPageStatusResponse, # type: ignore object_=_response.json(), ), ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def remix_image( - self, - *, - input_image: str, - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - text_prompt: typing.Optional[str] = OMIT, - selected_model: typing.Optional[Img2ImgPageRequestSelectedModel] = OMIT, - selected_controlnet_model: typing.Optional[Img2ImgPageRequestSelectedControlnetModel] = OMIT, - negative_prompt: typing.Optional[str] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[int] = OMIT, - output_width: typing.Optional[int] = OMIT, - output_height: typing.Optional[int] = OMIT, - guidance_scale: typing.Optional[float] = OMIT, - prompt_strength: typing.Optional[float] = OMIT, - controlnet_conditioning_scale: typing.Optional[typing.Sequence[float]] = OMIT, - seed: typing.Optional[int] = OMIT, - image_guidance_scale: typing.Optional[float] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[Img2ImgPageOutput]: + async def post_v3img2img_async( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> Img2ImgPageStatusResponse: """ Parameters ---------- - input_image : str - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - text_prompt : typing.Optional[str] - - selected_model : typing.Optional[Img2ImgPageRequestSelectedModel] - - selected_controlnet_model : typing.Optional[Img2ImgPageRequestSelectedControlnetModel] - - negative_prompt : typing.Optional[str] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[int] - - output_width : typing.Optional[int] - - output_height : typing.Optional[int] - - guidance_scale : typing.Optional[float] - - prompt_strength : typing.Optional[float] - - controlnet_conditioning_scale : typing.Optional[typing.Sequence[float]] - - seed : typing.Optional[int] - - image_guidance_scale : typing.Optional[float] - - settings : typing.Optional[RunSettings] - request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Optional[Img2ImgPageOutput] + Img2ImgPageStatusResponse Successful Response Examples @@ -7220,9 +2678,7 @@ async def remix_image( async def main() -> None: - await client.remix_image( - input_image="input_image", - ) + await client.post_v3img2img_async() asyncio.run(main()) @@ -7230,148 +2686,34 @@ async def main() -> None: _response = await self._client_wrapper.httpx_client.request( "v3/Img2Img/async", method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "input_image": input_image, - "text_prompt": text_prompt, - "selected_model": selected_model, - "selected_controlnet_model": selected_controlnet_model, - "negative_prompt": negative_prompt, - "num_outputs": num_outputs, - "quality": quality, - "output_width": output_width, - "output_height": output_height, - "guidance_scale": guidance_scale, - "prompt_strength": prompt_strength, - "controlnet_conditioning_scale": controlnet_conditioning_scale, - "seed": seed, - "image_guidance_scale": image_guidance_scale, - "settings": settings, - }, request_options=request_options, - omit=OMIT, ) try: if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( + return typing.cast( Img2ImgPageStatusResponse, parse_obj_as( type_=Img2ImgPageStatusResponse, # type: ignore object_=_response.json(), ), ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def text_to_image( - self, - *, - text_prompt: str, - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - negative_prompt: typing.Optional[str] = OMIT, - output_width: typing.Optional[int] = OMIT, - output_height: typing.Optional[int] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[int] = OMIT, - dall_e3quality: typing.Optional[str] = OMIT, - dall_e3style: typing.Optional[str] = OMIT, - guidance_scale: typing.Optional[float] = OMIT, - seed: typing.Optional[int] = OMIT, - sd2upscaling: typing.Optional[bool] = OMIT, - selected_models: typing.Optional[typing.Sequence[CompareText2ImgPageRequestSelectedModelsItem]] = OMIT, - scheduler: typing.Optional[CompareText2ImgPageRequestScheduler] = OMIT, - edit_instruction: typing.Optional[str] = OMIT, - image_guidance_scale: typing.Optional[float] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[CompareText2ImgPageOutput]: - """ - Parameters - ---------- - text_prompt : str - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - negative_prompt : typing.Optional[str] - - output_width : typing.Optional[int] - - output_height : typing.Optional[int] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[int] - - dall_e3quality : typing.Optional[str] - - dall_e3style : typing.Optional[str] - - guidance_scale : typing.Optional[float] - - seed : typing.Optional[int] - - sd2upscaling : typing.Optional[bool] - - selected_models : typing.Optional[typing.Sequence[CompareText2ImgPageRequestSelectedModelsItem]] - - scheduler : typing.Optional[CompareText2ImgPageRequestScheduler] - - edit_instruction : typing.Optional[str] - - image_guidance_scale : typing.Optional[float] - - settings : typing.Optional[RunSettings] - + async def post_v3compare_text2img_async( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> CompareText2ImgPageStatusResponse: + """ + Parameters + ---------- request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Optional[CompareText2ImgPageOutput] + CompareText2ImgPageStatusResponse Successful Response Examples @@ -7386,9 +2728,7 @@ async def text_to_image( async def main() -> None: - await client.text_to_image( - text_prompt="text_prompt", - ) + await client.post_v3compare_text2img_async() asyncio.run(main()) @@ -7396,149 +2736,34 @@ async def main() -> None: _response = await self._client_wrapper.httpx_client.request( "v3/CompareText2Img/async", method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "text_prompt": text_prompt, - "negative_prompt": negative_prompt, - "output_width": output_width, - "output_height": output_height, - "num_outputs": num_outputs, - "quality": quality, - "dall_e_3_quality": dall_e3quality, - "dall_e_3_style": dall_e3style, - "guidance_scale": guidance_scale, - "seed": seed, - "sd_2_upscaling": sd2upscaling, - "selected_models": selected_models, - "scheduler": scheduler, - "edit_instruction": edit_instruction, - "image_guidance_scale": image_guidance_scale, - "settings": settings, - }, request_options=request_options, - omit=OMIT, ) try: if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( + return typing.cast( CompareText2ImgPageStatusResponse, parse_obj_as( type_=CompareText2ImgPageStatusResponse, # type: ignore object_=_response.json(), ), ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def product_image( - self, - *, - input_image: str, - text_prompt: str, - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - obj_scale: typing.Optional[float] = OMIT, - obj_pos_x: typing.Optional[float] = OMIT, - obj_pos_y: typing.Optional[float] = OMIT, - mask_threshold: typing.Optional[float] = OMIT, - selected_model: typing.Optional[ObjectInpaintingPageRequestSelectedModel] = OMIT, - negative_prompt: typing.Optional[str] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[int] = OMIT, - output_width: typing.Optional[int] = OMIT, - output_height: typing.Optional[int] = OMIT, - guidance_scale: typing.Optional[float] = OMIT, - sd2upscaling: typing.Optional[bool] = OMIT, - seed: typing.Optional[int] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[ObjectInpaintingPageOutput]: + async def post_v3object_inpainting_async( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> ObjectInpaintingPageStatusResponse: """ Parameters ---------- - input_image : str - - text_prompt : str - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - obj_scale : typing.Optional[float] - - obj_pos_x : typing.Optional[float] - - obj_pos_y : typing.Optional[float] - - mask_threshold : typing.Optional[float] - - selected_model : typing.Optional[ObjectInpaintingPageRequestSelectedModel] - - negative_prompt : typing.Optional[str] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[int] - - output_width : typing.Optional[int] - - output_height : typing.Optional[int] - - guidance_scale : typing.Optional[float] - - sd2upscaling : typing.Optional[bool] - - seed : typing.Optional[int] - - settings : typing.Optional[RunSettings] - request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Optional[ObjectInpaintingPageOutput] + ObjectInpaintingPageStatusResponse Successful Response Examples @@ -7553,10 +2778,7 @@ async def product_image( async def main() -> None: - await client.product_image( - input_image="input_image", - text_prompt="text_prompt", - ) + await client.post_v3object_inpainting_async() asyncio.run(main()) @@ -7564,146 +2786,34 @@ async def main() -> None: _response = await self._client_wrapper.httpx_client.request( "v3/ObjectInpainting/async", method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "input_image": input_image, - "text_prompt": text_prompt, - "obj_scale": obj_scale, - "obj_pos_x": obj_pos_x, - "obj_pos_y": obj_pos_y, - "mask_threshold": mask_threshold, - "selected_model": selected_model, - "negative_prompt": negative_prompt, - "num_outputs": num_outputs, - "quality": quality, - "output_width": output_width, - "output_height": output_height, - "guidance_scale": guidance_scale, - "sd_2_upscaling": sd2upscaling, - "seed": seed, - "settings": settings, - }, request_options=request_options, - omit=OMIT, ) try: if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( + return typing.cast( ObjectInpaintingPageStatusResponse, parse_obj_as( type_=ObjectInpaintingPageStatusResponse, # type: ignore object_=_response.json(), ), ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def portrait( - self, - *, - input_image: str, - text_prompt: str, - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - face_scale: typing.Optional[float] = OMIT, - face_pos_x: typing.Optional[float] = OMIT, - face_pos_y: typing.Optional[float] = OMIT, - selected_model: typing.Optional[FaceInpaintingPageRequestSelectedModel] = OMIT, - negative_prompt: typing.Optional[str] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[int] = OMIT, - upscale_factor: typing.Optional[float] = OMIT, - output_width: typing.Optional[int] = OMIT, - output_height: typing.Optional[int] = OMIT, - guidance_scale: typing.Optional[float] = OMIT, - seed: typing.Optional[int] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[FaceInpaintingPageOutput]: + async def post_v3face_inpainting_async( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> FaceInpaintingPageStatusResponse: """ Parameters ---------- - input_image : str - - text_prompt : str - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - face_scale : typing.Optional[float] - - face_pos_x : typing.Optional[float] - - face_pos_y : typing.Optional[float] - - selected_model : typing.Optional[FaceInpaintingPageRequestSelectedModel] - - negative_prompt : typing.Optional[str] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[int] - - upscale_factor : typing.Optional[float] - - output_width : typing.Optional[int] - - output_height : typing.Optional[int] - - guidance_scale : typing.Optional[float] - - seed : typing.Optional[int] - - settings : typing.Optional[RunSettings] - request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Optional[FaceInpaintingPageOutput] + FaceInpaintingPageStatusResponse Successful Response Examples @@ -7718,10 +2828,7 @@ async def portrait( async def main() -> None: - await client.portrait( - input_image="input_image", - text_prompt="tony stark from the iron man", - ) + await client.post_v3face_inpainting_async() asyncio.run(main()) @@ -7729,172 +2836,34 @@ async def main() -> None: _response = await self._client_wrapper.httpx_client.request( "v3/FaceInpainting/async", method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "input_image": input_image, - "text_prompt": text_prompt, - "face_scale": face_scale, - "face_pos_x": face_pos_x, - "face_pos_y": face_pos_y, - "selected_model": selected_model, - "negative_prompt": negative_prompt, - "num_outputs": num_outputs, - "quality": quality, - "upscale_factor": upscale_factor, - "output_width": output_width, - "output_height": output_height, - "guidance_scale": guidance_scale, - "seed": seed, - "settings": settings, - }, request_options=request_options, - omit=OMIT, ) try: if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( + return typing.cast( FaceInpaintingPageStatusResponse, parse_obj_as( type_=FaceInpaintingPageStatusResponse, # type: ignore object_=_response.json(), ), ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def image_from_email( - self, - *, - text_prompt: str, - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - email_address: typing.Optional[str] = OMIT, - twitter_handle: typing.Optional[str] = OMIT, - face_scale: typing.Optional[float] = OMIT, - face_pos_x: typing.Optional[float] = OMIT, - face_pos_y: typing.Optional[float] = OMIT, - selected_model: typing.Optional[EmailFaceInpaintingPageRequestSelectedModel] = OMIT, - negative_prompt: typing.Optional[str] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[int] = OMIT, - upscale_factor: typing.Optional[float] = OMIT, - output_width: typing.Optional[int] = OMIT, - output_height: typing.Optional[int] = OMIT, - guidance_scale: typing.Optional[float] = OMIT, - should_send_email: typing.Optional[bool] = OMIT, - email_from: typing.Optional[str] = OMIT, - email_cc: typing.Optional[str] = OMIT, - email_bcc: typing.Optional[str] = OMIT, - email_subject: typing.Optional[str] = OMIT, - email_body: typing.Optional[str] = OMIT, - email_body_enable_html: typing.Optional[bool] = OMIT, - fallback_email_body: typing.Optional[str] = OMIT, - seed: typing.Optional[int] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[EmailFaceInpaintingPageOutput]: + async def post_v3email_face_inpainting_async( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> EmailFaceInpaintingPageStatusResponse: """ Parameters ---------- - text_prompt : str - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - email_address : typing.Optional[str] - - twitter_handle : typing.Optional[str] - - face_scale : typing.Optional[float] - - face_pos_x : typing.Optional[float] - - face_pos_y : typing.Optional[float] - - selected_model : typing.Optional[EmailFaceInpaintingPageRequestSelectedModel] - - negative_prompt : typing.Optional[str] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[int] - - upscale_factor : typing.Optional[float] - - output_width : typing.Optional[int] - - output_height : typing.Optional[int] - - guidance_scale : typing.Optional[float] - - should_send_email : typing.Optional[bool] - - email_from : typing.Optional[str] - - email_cc : typing.Optional[str] - - email_bcc : typing.Optional[str] - - email_subject : typing.Optional[str] - - email_body : typing.Optional[str] - - email_body_enable_html : typing.Optional[bool] - - fallback_email_body : typing.Optional[str] - - seed : typing.Optional[int] - - settings : typing.Optional[RunSettings] - request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Optional[EmailFaceInpaintingPageOutput] + EmailFaceInpaintingPageStatusResponse Successful Response Examples @@ -7909,10 +2878,7 @@ async def image_from_email( async def main() -> None: - await client.image_from_email( - email_address="sean@dara.network", - text_prompt="winter's day in paris", - ) + await client.post_v3email_face_inpainting_async() asyncio.run(main()) @@ -7920,152 +2886,34 @@ async def main() -> None: _response = await self._client_wrapper.httpx_client.request( "v3/EmailFaceInpainting/async", method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "email_address": email_address, - "twitter_handle": twitter_handle, - "text_prompt": text_prompt, - "face_scale": face_scale, - "face_pos_x": face_pos_x, - "face_pos_y": face_pos_y, - "selected_model": selected_model, - "negative_prompt": negative_prompt, - "num_outputs": num_outputs, - "quality": quality, - "upscale_factor": upscale_factor, - "output_width": output_width, - "output_height": output_height, - "guidance_scale": guidance_scale, - "should_send_email": should_send_email, - "email_from": email_from, - "email_cc": email_cc, - "email_bcc": email_bcc, - "email_subject": email_subject, - "email_body": email_body, - "email_body_enable_html": email_body_enable_html, - "fallback_email_body": fallback_email_body, - "seed": seed, - "settings": settings, - }, request_options=request_options, - omit=OMIT, ) try: if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( + return typing.cast( EmailFaceInpaintingPageStatusResponse, parse_obj_as( type_=EmailFaceInpaintingPageStatusResponse, # type: ignore object_=_response.json(), ), ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def image_from_web_search( - self, - *, - search_query: str, - text_prompt: str, - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, - scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, - selected_model: typing.Optional[GoogleImageGenPageRequestSelectedModel] = OMIT, - negative_prompt: typing.Optional[str] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[int] = OMIT, - guidance_scale: typing.Optional[float] = OMIT, - prompt_strength: typing.Optional[float] = OMIT, - sd2upscaling: typing.Optional[bool] = OMIT, - seed: typing.Optional[int] = OMIT, - image_guidance_scale: typing.Optional[float] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[GoogleImageGenPageOutput]: + async def post_v3google_image_gen_async( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> GoogleImageGenPageStatusResponse: """ Parameters ---------- - search_query : str - - text_prompt : str - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - serp_search_location : typing.Optional[SerpSearchLocation] - - scaleserp_locations : typing.Optional[typing.Sequence[str]] - DEPRECATED: use `serp_search_location` instead - - selected_model : typing.Optional[GoogleImageGenPageRequestSelectedModel] - - negative_prompt : typing.Optional[str] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[int] - - guidance_scale : typing.Optional[float] - - prompt_strength : typing.Optional[float] - - sd2upscaling : typing.Optional[bool] - - seed : typing.Optional[int] - - image_guidance_scale : typing.Optional[float] - - settings : typing.Optional[RunSettings] - request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Optional[GoogleImageGenPageOutput] + GoogleImageGenPageStatusResponse Successful Response Examples @@ -8080,10 +2928,7 @@ async def image_from_web_search( async def main() -> None: - await client.image_from_web_search( - search_query="search_query", - text_prompt="text_prompt", - ) + await client.post_v3google_image_gen_async() asyncio.run(main()) @@ -8091,126 +2936,34 @@ async def main() -> None: _response = await self._client_wrapper.httpx_client.request( "v3/GoogleImageGen/async", method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "serp_search_location": serp_search_location, - "scaleserp_locations": scaleserp_locations, - "search_query": search_query, - "text_prompt": text_prompt, - "selected_model": selected_model, - "negative_prompt": negative_prompt, - "num_outputs": num_outputs, - "quality": quality, - "guidance_scale": guidance_scale, - "prompt_strength": prompt_strength, - "sd_2_upscaling": sd2upscaling, - "seed": seed, - "image_guidance_scale": image_guidance_scale, - "settings": settings, - }, request_options=request_options, - omit=OMIT, ) try: if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( + return typing.cast( GoogleImageGenPageStatusResponse, parse_obj_as( type_=GoogleImageGenPageStatusResponse, # type: ignore object_=_response.json(), ), ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def remove_background( - self, - *, - input_image: str, - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - selected_model: typing.Optional[ImageSegmentationPageRequestSelectedModel] = OMIT, - mask_threshold: typing.Optional[float] = OMIT, - rect_persepective_transform: typing.Optional[bool] = OMIT, - reflection_opacity: typing.Optional[float] = OMIT, - obj_scale: typing.Optional[float] = OMIT, - obj_pos_x: typing.Optional[float] = OMIT, - obj_pos_y: typing.Optional[float] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[ImageSegmentationPageOutput]: + async def post_v3image_segmentation_async( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> ImageSegmentationPageStatusResponse: """ Parameters ---------- - input_image : str - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - selected_model : typing.Optional[ImageSegmentationPageRequestSelectedModel] - - mask_threshold : typing.Optional[float] - - rect_persepective_transform : typing.Optional[bool] - - reflection_opacity : typing.Optional[float] - - obj_scale : typing.Optional[float] - - obj_pos_x : typing.Optional[float] - - obj_pos_y : typing.Optional[float] - - settings : typing.Optional[RunSettings] - request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Optional[ImageSegmentationPageOutput] + ImageSegmentationPageStatusResponse Successful Response Examples @@ -8225,9 +2978,7 @@ async def remove_background( async def main() -> None: - await client.remove_background( - input_image="input_image", - ) + await client.post_v3image_segmentation_async() asyncio.run(main()) @@ -8235,115 +2986,34 @@ async def main() -> None: _response = await self._client_wrapper.httpx_client.request( "v3/ImageSegmentation/async", method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "input_image": input_image, - "selected_model": selected_model, - "mask_threshold": mask_threshold, - "rect_persepective_transform": rect_persepective_transform, - "reflection_opacity": reflection_opacity, - "obj_scale": obj_scale, - "obj_pos_x": obj_pos_x, - "obj_pos_y": obj_pos_y, - "settings": settings, - }, request_options=request_options, - omit=OMIT, ) try: if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( + return typing.cast( ImageSegmentationPageStatusResponse, parse_obj_as( type_=ImageSegmentationPageStatusResponse, # type: ignore object_=_response.json(), ), ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def upscale( - self, - *, - scale: int, - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - input_image: typing.Optional[str] = OMIT, - input_video: typing.Optional[str] = OMIT, - selected_models: typing.Optional[typing.Sequence[CompareUpscalerPageRequestSelectedModelsItem]] = OMIT, - selected_bg_model: typing.Optional[typing.Literal["real_esrgan_x2"]] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[CompareUpscalerPageOutput]: + async def post_v3compare_ai_upscalers_async( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> CompareUpscalerPageStatusResponse: """ Parameters ---------- - scale : int - The final upsampling scale of the image - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - input_image : typing.Optional[str] - Input Image - - input_video : typing.Optional[str] - Input Video - - selected_models : typing.Optional[typing.Sequence[CompareUpscalerPageRequestSelectedModelsItem]] - - selected_bg_model : typing.Optional[typing.Literal["real_esrgan_x2"]] - - settings : typing.Optional[RunSettings] - request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Optional[CompareUpscalerPageOutput] + CompareUpscalerPageStatusResponse Successful Response Examples @@ -8358,9 +3028,7 @@ async def upscale( async def main() -> None: - await client.upscale( - scale=1, - ) + await client.post_v3compare_ai_upscalers_async() asyncio.run(main()) @@ -8368,100 +3036,34 @@ async def main() -> None: _response = await self._client_wrapper.httpx_client.request( "v3/compare-ai-upscalers/async", method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "input_image": input_image, - "input_video": input_video, - "scale": scale, - "selected_models": selected_models, - "selected_bg_model": selected_bg_model, - "settings": settings, - }, request_options=request_options, - omit=OMIT, ) try: if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( + return typing.cast( CompareUpscalerPageStatusResponse, parse_obj_as( type_=CompareUpscalerPageStatusResponse, # type: ignore object_=_response.json(), ), ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def embed( - self, - *, - texts: typing.Sequence[str], - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - selected_model: typing.Optional[EmbeddingsPageRequestSelectedModel] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[EmbeddingsPageOutput]: + async def post_v3chyron_plant_async( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> ChyronPlantPageStatusResponse: """ Parameters ---------- - texts : typing.Sequence[str] - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - selected_model : typing.Optional[EmbeddingsPageRequestSelectedModel] - - settings : typing.Optional[RunSettings] - request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Optional[EmbeddingsPageOutput] + ChyronPlantPageStatusResponse Successful Response Examples @@ -8476,176 +3078,142 @@ async def embed( async def main() -> None: - await client.embed( - texts=["texts"], - ) + await client.post_v3chyron_plant_async() asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/embeddings/async", + "v3/ChyronPlant/async", method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "texts": texts, - "selected_model": selected_model, - "settings": settings, - }, request_options=request_options, - omit=OMIT, ) try: if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( - EmbeddingsPageStatusResponse, + return typing.cast( + ChyronPlantPageStatusResponse, parse_obj_as( - type_=EmbeddingsPageStatusResponse, # type: ignore + type_=ChyronPlantPageStatusResponse, # type: ignore object_=_response.json(), ), ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def seo_people_also_ask_doc( - self, - *, - search_query: str, - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - keyword_query: typing.Optional[RelatedQnADocPageRequestKeywordQuery] = OMIT, - documents: typing.Optional[typing.Sequence[str]] = OMIT, - max_references: typing.Optional[int] = OMIT, - max_context_words: typing.Optional[int] = OMIT, - scroll_jump: typing.Optional[int] = OMIT, - doc_extract_url: typing.Optional[str] = OMIT, - embedding_model: typing.Optional[RelatedQnADocPageRequestEmbeddingModel] = OMIT, - dense_weight: typing.Optional[float] = OMIT, - task_instructions: typing.Optional[str] = OMIT, - query_instructions: typing.Optional[str] = OMIT, - selected_model: typing.Optional[RelatedQnADocPageRequestSelectedModel] = OMIT, - citation_style: typing.Optional[RelatedQnADocPageRequestCitationStyle] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[RelatedQnADocPageRequestResponseFormatType] = OMIT, - serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, - scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, - serp_search_type: typing.Optional[SerpSearchType] = OMIT, - scaleserp_search_field: typing.Optional[str] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> typing.Optional[RelatedQnADocPageOutput]: + async def post_v3letter_writer_async( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> LetterWriterPageStatusResponse: """ Parameters ---------- - search_query : str - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - keyword_query : typing.Optional[RelatedQnADocPageRequestKeywordQuery] - - documents : typing.Optional[typing.Sequence[str]] - - max_references : typing.Optional[int] - - max_context_words : typing.Optional[int] - - scroll_jump : typing.Optional[int] - - doc_extract_url : typing.Optional[str] - - embedding_model : typing.Optional[RelatedQnADocPageRequestEmbeddingModel] - - dense_weight : typing.Optional[float] + request_options : typing.Optional[RequestOptions] + Request-specific configuration. - Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. - Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. + Returns + ------- + LetterWriterPageStatusResponse + Successful Response + Examples + -------- + import asyncio - task_instructions : typing.Optional[str] + from gooey import AsyncGooey - query_instructions : typing.Optional[str] + client = AsyncGooey( + api_key="YOUR_API_KEY", + ) - selected_model : typing.Optional[RelatedQnADocPageRequestSelectedModel] - citation_style : typing.Optional[RelatedQnADocPageRequestCitationStyle] + async def main() -> None: + await client.post_v3letter_writer_async() - avoid_repetition : typing.Optional[bool] - num_outputs : typing.Optional[int] + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "v3/LetterWriter/async", + method="POST", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + LetterWriterPageStatusResponse, + parse_obj_as( + type_=LetterWriterPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) - quality : typing.Optional[float] + async def post_v3embeddings_async( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> EmbeddingsPageStatusResponse: + """ + Parameters + ---------- + request_options : typing.Optional[RequestOptions] + Request-specific configuration. - max_tokens : typing.Optional[int] + Returns + ------- + EmbeddingsPageStatusResponse + Successful Response - sampling_temperature : typing.Optional[float] + Examples + -------- + import asyncio - response_format_type : typing.Optional[RelatedQnADocPageRequestResponseFormatType] + from gooey import AsyncGooey - serp_search_location : typing.Optional[SerpSearchLocation] + client = AsyncGooey( + api_key="YOUR_API_KEY", + ) - scaleserp_locations : typing.Optional[typing.Sequence[str]] - DEPRECATED: use `serp_search_location` instead - serp_search_type : typing.Optional[SerpSearchType] + async def main() -> None: + await client.post_v3embeddings_async() - scaleserp_search_field : typing.Optional[str] - DEPRECATED: use `serp_search_type` instead - settings : typing.Optional[RunSettings] + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "v3/embeddings/async", + method="POST", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + EmbeddingsPageStatusResponse, + parse_obj_as( + type_=EmbeddingsPageStatusResponse, # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + async def post_v3related_qna_maker_doc_async( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> RelatedQnADocPageStatusResponse: + """ + Parameters + ---------- request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Optional[RelatedQnADocPageOutput] + RelatedQnADocPageStatusResponse Successful Response Examples @@ -8660,9 +3228,7 @@ async def seo_people_also_ask_doc( async def main() -> None: - await client.seo_people_also_ask_doc( - search_query="search_query", - ) + await client.post_v3related_qna_maker_doc_async() asyncio.run(main()) @@ -8670,80 +3236,17 @@ async def main() -> None: _response = await self._client_wrapper.httpx_client.request( "v3/related-qna-maker-doc/async", method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "search_query": search_query, - "keyword_query": keyword_query, - "documents": documents, - "max_references": max_references, - "max_context_words": max_context_words, - "scroll_jump": scroll_jump, - "doc_extract_url": doc_extract_url, - "embedding_model": embedding_model, - "dense_weight": dense_weight, - "task_instructions": task_instructions, - "query_instructions": query_instructions, - "selected_model": selected_model, - "citation_style": citation_style, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "response_format_type": response_format_type, - "serp_search_location": serp_search_location, - "scaleserp_locations": scaleserp_locations, - "serp_search_type": serp_search_type, - "scaleserp_search_field": scaleserp_search_field, - "settings": settings, - }, request_options=request_options, - omit=OMIT, ) try: if 200 <= _response.status_code < 300: - _parsed_response = typing.cast( + return typing.cast( RelatedQnADocPageStatusResponse, parse_obj_as( type_=RelatedQnADocPageStatusResponse, # type: ignore object_=_response.json(), ), ) - return _parsed_response.output - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) diff --git a/src/gooey/copilot_for_your_enterprise/__init__.py b/src/gooey/copilot_for_your_enterprise/__init__.py deleted file mode 100644 index f1637db..0000000 --- a/src/gooey/copilot_for_your_enterprise/__init__.py +++ /dev/null @@ -1,27 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from .types import ( - VideoBotsPageRequestAsrModel, - VideoBotsPageRequestCitationStyle, - VideoBotsPageRequestEmbeddingModel, - VideoBotsPageRequestLipsyncModel, - VideoBotsPageRequestOpenaiTtsModel, - VideoBotsPageRequestOpenaiVoiceName, - VideoBotsPageRequestResponseFormatType, - VideoBotsPageRequestSelectedModel, - VideoBotsPageRequestTranslationModel, - VideoBotsPageRequestTtsProvider, -) - -__all__ = [ - "VideoBotsPageRequestAsrModel", - "VideoBotsPageRequestCitationStyle", - "VideoBotsPageRequestEmbeddingModel", - "VideoBotsPageRequestLipsyncModel", - "VideoBotsPageRequestOpenaiTtsModel", - "VideoBotsPageRequestOpenaiVoiceName", - "VideoBotsPageRequestResponseFormatType", - "VideoBotsPageRequestSelectedModel", - "VideoBotsPageRequestTranslationModel", - "VideoBotsPageRequestTtsProvider", -] diff --git a/src/gooey/copilot_for_your_enterprise/client.py b/src/gooey/copilot_for_your_enterprise/client.py deleted file mode 100644 index 5668f9f..0000000 --- a/src/gooey/copilot_for_your_enterprise/client.py +++ /dev/null @@ -1,741 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing -from ..core.client_wrapper import SyncClientWrapper -from ..types.recipe_function import RecipeFunction -from ..types.conversation_entry import ConversationEntry -from .types.video_bots_page_request_selected_model import VideoBotsPageRequestSelectedModel -from .types.video_bots_page_request_embedding_model import VideoBotsPageRequestEmbeddingModel -from .types.video_bots_page_request_citation_style import VideoBotsPageRequestCitationStyle -from .types.video_bots_page_request_asr_model import VideoBotsPageRequestAsrModel -from .types.video_bots_page_request_translation_model import VideoBotsPageRequestTranslationModel -from .types.video_bots_page_request_lipsync_model import VideoBotsPageRequestLipsyncModel -from ..types.llm_tools import LlmTools -from .types.video_bots_page_request_response_format_type import VideoBotsPageRequestResponseFormatType -from .types.video_bots_page_request_tts_provider import VideoBotsPageRequestTtsProvider -from .types.video_bots_page_request_openai_voice_name import VideoBotsPageRequestOpenaiVoiceName -from .types.video_bots_page_request_openai_tts_model import VideoBotsPageRequestOpenaiTtsModel -from ..types.sad_talker_settings import SadTalkerSettings -from ..types.run_settings import RunSettings -from ..core.request_options import RequestOptions -from ..types.video_bots_page_status_response import VideoBotsPageStatusResponse -from ..core.pydantic_utilities import parse_obj_as -from ..errors.payment_required_error import PaymentRequiredError -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.http_validation_error import HttpValidationError -from ..errors.too_many_requests_error import TooManyRequestsError -from ..types.generic_error_response import GenericErrorResponse -from json.decoder import JSONDecodeError -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper - -# this is used as the default value for optional parameters -OMIT = typing.cast(typing.Any, ...) - - -class CopilotForYourEnterpriseClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper - - def async_video_bots( - self, - *, - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - input_prompt: typing.Optional[str] = OMIT, - input_audio: typing.Optional[str] = OMIT, - input_images: typing.Optional[typing.Sequence[str]] = OMIT, - input_documents: typing.Optional[typing.Sequence[str]] = OMIT, - doc_extract_url: typing.Optional[str] = OMIT, - messages: typing.Optional[typing.Sequence[ConversationEntry]] = OMIT, - bot_script: typing.Optional[str] = OMIT, - selected_model: typing.Optional[VideoBotsPageRequestSelectedModel] = OMIT, - document_model: typing.Optional[str] = OMIT, - task_instructions: typing.Optional[str] = OMIT, - query_instructions: typing.Optional[str] = OMIT, - keyword_instructions: typing.Optional[str] = OMIT, - documents: typing.Optional[typing.Sequence[str]] = OMIT, - max_references: typing.Optional[int] = OMIT, - max_context_words: typing.Optional[int] = OMIT, - scroll_jump: typing.Optional[int] = OMIT, - embedding_model: typing.Optional[VideoBotsPageRequestEmbeddingModel] = OMIT, - dense_weight: typing.Optional[float] = OMIT, - citation_style: typing.Optional[VideoBotsPageRequestCitationStyle] = OMIT, - use_url_shortener: typing.Optional[bool] = OMIT, - asr_model: typing.Optional[VideoBotsPageRequestAsrModel] = OMIT, - asr_language: typing.Optional[str] = OMIT, - translation_model: typing.Optional[VideoBotsPageRequestTranslationModel] = OMIT, - user_language: typing.Optional[str] = OMIT, - input_glossary_document: typing.Optional[str] = OMIT, - output_glossary_document: typing.Optional[str] = OMIT, - lipsync_model: typing.Optional[VideoBotsPageRequestLipsyncModel] = OMIT, - tools: typing.Optional[typing.Sequence[LlmTools]] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[VideoBotsPageRequestResponseFormatType] = OMIT, - tts_provider: typing.Optional[VideoBotsPageRequestTtsProvider] = OMIT, - uberduck_voice_name: typing.Optional[str] = OMIT, - uberduck_speaking_rate: typing.Optional[float] = OMIT, - google_voice_name: typing.Optional[str] = OMIT, - google_speaking_rate: typing.Optional[float] = OMIT, - google_pitch: typing.Optional[float] = OMIT, - bark_history_prompt: typing.Optional[str] = OMIT, - elevenlabs_voice_name: typing.Optional[str] = OMIT, - elevenlabs_api_key: typing.Optional[str] = OMIT, - elevenlabs_voice_id: typing.Optional[str] = OMIT, - elevenlabs_model: typing.Optional[str] = OMIT, - elevenlabs_stability: typing.Optional[float] = OMIT, - elevenlabs_similarity_boost: typing.Optional[float] = OMIT, - elevenlabs_style: typing.Optional[float] = OMIT, - elevenlabs_speaker_boost: typing.Optional[bool] = OMIT, - azure_voice_name: typing.Optional[str] = OMIT, - openai_voice_name: typing.Optional[VideoBotsPageRequestOpenaiVoiceName] = OMIT, - openai_tts_model: typing.Optional[VideoBotsPageRequestOpenaiTtsModel] = OMIT, - input_face: typing.Optional[str] = OMIT, - face_padding_top: typing.Optional[int] = OMIT, - face_padding_bottom: typing.Optional[int] = OMIT, - face_padding_left: typing.Optional[int] = OMIT, - face_padding_right: typing.Optional[int] = OMIT, - sadtalker_settings: typing.Optional[SadTalkerSettings] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> VideoBotsPageStatusResponse: - """ - Parameters - ---------- - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - input_prompt : typing.Optional[str] - - input_audio : typing.Optional[str] - - input_images : typing.Optional[typing.Sequence[str]] - - input_documents : typing.Optional[typing.Sequence[str]] - - doc_extract_url : typing.Optional[str] - Select a workflow to extract text from documents and images. - - messages : typing.Optional[typing.Sequence[ConversationEntry]] - - bot_script : typing.Optional[str] - - selected_model : typing.Optional[VideoBotsPageRequestSelectedModel] - - document_model : typing.Optional[str] - When your copilot users upload a photo or pdf, what kind of document are they mostly likely to upload? (via [Azure](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/how-to-guides/use-sdk-rest-api?view=doc-intel-3.1.0&tabs=linux&pivots=programming-language-rest-api)) - - task_instructions : typing.Optional[str] - - query_instructions : typing.Optional[str] - - keyword_instructions : typing.Optional[str] - - documents : typing.Optional[typing.Sequence[str]] - - max_references : typing.Optional[int] - - max_context_words : typing.Optional[int] - - scroll_jump : typing.Optional[int] - - embedding_model : typing.Optional[VideoBotsPageRequestEmbeddingModel] - - dense_weight : typing.Optional[float] - - Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. - Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - - - citation_style : typing.Optional[VideoBotsPageRequestCitationStyle] - - use_url_shortener : typing.Optional[bool] - - asr_model : typing.Optional[VideoBotsPageRequestAsrModel] - Choose a model to transcribe incoming audio messages to text. - - asr_language : typing.Optional[str] - Choose a language to transcribe incoming audio messages to text. - - translation_model : typing.Optional[VideoBotsPageRequestTranslationModel] - - user_language : typing.Optional[str] - Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages. - - input_glossary_document : typing.Optional[str] - - Translation Glossary for User Langauge -> LLM Language (English) - - - output_glossary_document : typing.Optional[str] - - Translation Glossary for LLM Language (English) -> User Langauge - - - lipsync_model : typing.Optional[VideoBotsPageRequestLipsyncModel] - - tools : typing.Optional[typing.Sequence[LlmTools]] - Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling). - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - response_format_type : typing.Optional[VideoBotsPageRequestResponseFormatType] - - tts_provider : typing.Optional[VideoBotsPageRequestTtsProvider] - - uberduck_voice_name : typing.Optional[str] - - uberduck_speaking_rate : typing.Optional[float] - - google_voice_name : typing.Optional[str] - - google_speaking_rate : typing.Optional[float] - - google_pitch : typing.Optional[float] - - bark_history_prompt : typing.Optional[str] - - elevenlabs_voice_name : typing.Optional[str] - Use `elevenlabs_voice_id` instead - - elevenlabs_api_key : typing.Optional[str] - - elevenlabs_voice_id : typing.Optional[str] - - elevenlabs_model : typing.Optional[str] - - elevenlabs_stability : typing.Optional[float] - - elevenlabs_similarity_boost : typing.Optional[float] - - elevenlabs_style : typing.Optional[float] - - elevenlabs_speaker_boost : typing.Optional[bool] - - azure_voice_name : typing.Optional[str] - - openai_voice_name : typing.Optional[VideoBotsPageRequestOpenaiVoiceName] - - openai_tts_model : typing.Optional[VideoBotsPageRequestOpenaiTtsModel] - - input_face : typing.Optional[str] - - face_padding_top : typing.Optional[int] - - face_padding_bottom : typing.Optional[int] - - face_padding_left : typing.Optional[int] - - face_padding_right : typing.Optional[int] - - sadtalker_settings : typing.Optional[SadTalkerSettings] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - VideoBotsPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.copilot_for_your_enterprise.async_video_bots() - """ - _response = self._client_wrapper.httpx_client.request( - "v3/video-bots/async", - method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "input_prompt": input_prompt, - "input_audio": input_audio, - "input_images": input_images, - "input_documents": input_documents, - "doc_extract_url": doc_extract_url, - "messages": messages, - "bot_script": bot_script, - "selected_model": selected_model, - "document_model": document_model, - "task_instructions": task_instructions, - "query_instructions": query_instructions, - "keyword_instructions": keyword_instructions, - "documents": documents, - "max_references": max_references, - "max_context_words": max_context_words, - "scroll_jump": scroll_jump, - "embedding_model": embedding_model, - "dense_weight": dense_weight, - "citation_style": citation_style, - "use_url_shortener": use_url_shortener, - "asr_model": asr_model, - "asr_language": asr_language, - "translation_model": translation_model, - "user_language": user_language, - "input_glossary_document": input_glossary_document, - "output_glossary_document": output_glossary_document, - "lipsync_model": lipsync_model, - "tools": tools, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "response_format_type": response_format_type, - "tts_provider": tts_provider, - "uberduck_voice_name": uberduck_voice_name, - "uberduck_speaking_rate": uberduck_speaking_rate, - "google_voice_name": google_voice_name, - "google_speaking_rate": google_speaking_rate, - "google_pitch": google_pitch, - "bark_history_prompt": bark_history_prompt, - "elevenlabs_voice_name": elevenlabs_voice_name, - "elevenlabs_api_key": elevenlabs_api_key, - "elevenlabs_voice_id": elevenlabs_voice_id, - "elevenlabs_model": elevenlabs_model, - "elevenlabs_stability": elevenlabs_stability, - "elevenlabs_similarity_boost": elevenlabs_similarity_boost, - "elevenlabs_style": elevenlabs_style, - "elevenlabs_speaker_boost": elevenlabs_speaker_boost, - "azure_voice_name": azure_voice_name, - "openai_voice_name": openai_voice_name, - "openai_tts_model": openai_tts_model, - "input_face": input_face, - "face_padding_top": face_padding_top, - "face_padding_bottom": face_padding_bottom, - "face_padding_left": face_padding_left, - "face_padding_right": face_padding_right, - "sadtalker_settings": sadtalker_settings, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - VideoBotsPageStatusResponse, - parse_obj_as( - type_=VideoBotsPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - -class AsyncCopilotForYourEnterpriseClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def async_video_bots( - self, - *, - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - input_prompt: typing.Optional[str] = OMIT, - input_audio: typing.Optional[str] = OMIT, - input_images: typing.Optional[typing.Sequence[str]] = OMIT, - input_documents: typing.Optional[typing.Sequence[str]] = OMIT, - doc_extract_url: typing.Optional[str] = OMIT, - messages: typing.Optional[typing.Sequence[ConversationEntry]] = OMIT, - bot_script: typing.Optional[str] = OMIT, - selected_model: typing.Optional[VideoBotsPageRequestSelectedModel] = OMIT, - document_model: typing.Optional[str] = OMIT, - task_instructions: typing.Optional[str] = OMIT, - query_instructions: typing.Optional[str] = OMIT, - keyword_instructions: typing.Optional[str] = OMIT, - documents: typing.Optional[typing.Sequence[str]] = OMIT, - max_references: typing.Optional[int] = OMIT, - max_context_words: typing.Optional[int] = OMIT, - scroll_jump: typing.Optional[int] = OMIT, - embedding_model: typing.Optional[VideoBotsPageRequestEmbeddingModel] = OMIT, - dense_weight: typing.Optional[float] = OMIT, - citation_style: typing.Optional[VideoBotsPageRequestCitationStyle] = OMIT, - use_url_shortener: typing.Optional[bool] = OMIT, - asr_model: typing.Optional[VideoBotsPageRequestAsrModel] = OMIT, - asr_language: typing.Optional[str] = OMIT, - translation_model: typing.Optional[VideoBotsPageRequestTranslationModel] = OMIT, - user_language: typing.Optional[str] = OMIT, - input_glossary_document: typing.Optional[str] = OMIT, - output_glossary_document: typing.Optional[str] = OMIT, - lipsync_model: typing.Optional[VideoBotsPageRequestLipsyncModel] = OMIT, - tools: typing.Optional[typing.Sequence[LlmTools]] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[VideoBotsPageRequestResponseFormatType] = OMIT, - tts_provider: typing.Optional[VideoBotsPageRequestTtsProvider] = OMIT, - uberduck_voice_name: typing.Optional[str] = OMIT, - uberduck_speaking_rate: typing.Optional[float] = OMIT, - google_voice_name: typing.Optional[str] = OMIT, - google_speaking_rate: typing.Optional[float] = OMIT, - google_pitch: typing.Optional[float] = OMIT, - bark_history_prompt: typing.Optional[str] = OMIT, - elevenlabs_voice_name: typing.Optional[str] = OMIT, - elevenlabs_api_key: typing.Optional[str] = OMIT, - elevenlabs_voice_id: typing.Optional[str] = OMIT, - elevenlabs_model: typing.Optional[str] = OMIT, - elevenlabs_stability: typing.Optional[float] = OMIT, - elevenlabs_similarity_boost: typing.Optional[float] = OMIT, - elevenlabs_style: typing.Optional[float] = OMIT, - elevenlabs_speaker_boost: typing.Optional[bool] = OMIT, - azure_voice_name: typing.Optional[str] = OMIT, - openai_voice_name: typing.Optional[VideoBotsPageRequestOpenaiVoiceName] = OMIT, - openai_tts_model: typing.Optional[VideoBotsPageRequestOpenaiTtsModel] = OMIT, - input_face: typing.Optional[str] = OMIT, - face_padding_top: typing.Optional[int] = OMIT, - face_padding_bottom: typing.Optional[int] = OMIT, - face_padding_left: typing.Optional[int] = OMIT, - face_padding_right: typing.Optional[int] = OMIT, - sadtalker_settings: typing.Optional[SadTalkerSettings] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> VideoBotsPageStatusResponse: - """ - Parameters - ---------- - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - input_prompt : typing.Optional[str] - - input_audio : typing.Optional[str] - - input_images : typing.Optional[typing.Sequence[str]] - - input_documents : typing.Optional[typing.Sequence[str]] - - doc_extract_url : typing.Optional[str] - Select a workflow to extract text from documents and images. - - messages : typing.Optional[typing.Sequence[ConversationEntry]] - - bot_script : typing.Optional[str] - - selected_model : typing.Optional[VideoBotsPageRequestSelectedModel] - - document_model : typing.Optional[str] - When your copilot users upload a photo or pdf, what kind of document are they mostly likely to upload? (via [Azure](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/how-to-guides/use-sdk-rest-api?view=doc-intel-3.1.0&tabs=linux&pivots=programming-language-rest-api)) - - task_instructions : typing.Optional[str] - - query_instructions : typing.Optional[str] - - keyword_instructions : typing.Optional[str] - - documents : typing.Optional[typing.Sequence[str]] - - max_references : typing.Optional[int] - - max_context_words : typing.Optional[int] - - scroll_jump : typing.Optional[int] - - embedding_model : typing.Optional[VideoBotsPageRequestEmbeddingModel] - - dense_weight : typing.Optional[float] - - Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. - Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - - - citation_style : typing.Optional[VideoBotsPageRequestCitationStyle] - - use_url_shortener : typing.Optional[bool] - - asr_model : typing.Optional[VideoBotsPageRequestAsrModel] - Choose a model to transcribe incoming audio messages to text. - - asr_language : typing.Optional[str] - Choose a language to transcribe incoming audio messages to text. - - translation_model : typing.Optional[VideoBotsPageRequestTranslationModel] - - user_language : typing.Optional[str] - Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages. - - input_glossary_document : typing.Optional[str] - - Translation Glossary for User Langauge -> LLM Language (English) - - - output_glossary_document : typing.Optional[str] - - Translation Glossary for LLM Language (English) -> User Langauge - - - lipsync_model : typing.Optional[VideoBotsPageRequestLipsyncModel] - - tools : typing.Optional[typing.Sequence[LlmTools]] - Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling). - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - response_format_type : typing.Optional[VideoBotsPageRequestResponseFormatType] - - tts_provider : typing.Optional[VideoBotsPageRequestTtsProvider] - - uberduck_voice_name : typing.Optional[str] - - uberduck_speaking_rate : typing.Optional[float] - - google_voice_name : typing.Optional[str] - - google_speaking_rate : typing.Optional[float] - - google_pitch : typing.Optional[float] - - bark_history_prompt : typing.Optional[str] - - elevenlabs_voice_name : typing.Optional[str] - Use `elevenlabs_voice_id` instead - - elevenlabs_api_key : typing.Optional[str] - - elevenlabs_voice_id : typing.Optional[str] - - elevenlabs_model : typing.Optional[str] - - elevenlabs_stability : typing.Optional[float] - - elevenlabs_similarity_boost : typing.Optional[float] - - elevenlabs_style : typing.Optional[float] - - elevenlabs_speaker_boost : typing.Optional[bool] - - azure_voice_name : typing.Optional[str] - - openai_voice_name : typing.Optional[VideoBotsPageRequestOpenaiVoiceName] - - openai_tts_model : typing.Optional[VideoBotsPageRequestOpenaiTtsModel] - - input_face : typing.Optional[str] - - face_padding_top : typing.Optional[int] - - face_padding_bottom : typing.Optional[int] - - face_padding_left : typing.Optional[int] - - face_padding_right : typing.Optional[int] - - sadtalker_settings : typing.Optional[SadTalkerSettings] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - VideoBotsPageStatusResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.copilot_for_your_enterprise.async_video_bots() - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/video-bots/async", - method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "input_prompt": input_prompt, - "input_audio": input_audio, - "input_images": input_images, - "input_documents": input_documents, - "doc_extract_url": doc_extract_url, - "messages": messages, - "bot_script": bot_script, - "selected_model": selected_model, - "document_model": document_model, - "task_instructions": task_instructions, - "query_instructions": query_instructions, - "keyword_instructions": keyword_instructions, - "documents": documents, - "max_references": max_references, - "max_context_words": max_context_words, - "scroll_jump": scroll_jump, - "embedding_model": embedding_model, - "dense_weight": dense_weight, - "citation_style": citation_style, - "use_url_shortener": use_url_shortener, - "asr_model": asr_model, - "asr_language": asr_language, - "translation_model": translation_model, - "user_language": user_language, - "input_glossary_document": input_glossary_document, - "output_glossary_document": output_glossary_document, - "lipsync_model": lipsync_model, - "tools": tools, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "response_format_type": response_format_type, - "tts_provider": tts_provider, - "uberduck_voice_name": uberduck_voice_name, - "uberduck_speaking_rate": uberduck_speaking_rate, - "google_voice_name": google_voice_name, - "google_speaking_rate": google_speaking_rate, - "google_pitch": google_pitch, - "bark_history_prompt": bark_history_prompt, - "elevenlabs_voice_name": elevenlabs_voice_name, - "elevenlabs_api_key": elevenlabs_api_key, - "elevenlabs_voice_id": elevenlabs_voice_id, - "elevenlabs_model": elevenlabs_model, - "elevenlabs_stability": elevenlabs_stability, - "elevenlabs_similarity_boost": elevenlabs_similarity_boost, - "elevenlabs_style": elevenlabs_style, - "elevenlabs_speaker_boost": elevenlabs_speaker_boost, - "azure_voice_name": azure_voice_name, - "openai_voice_name": openai_voice_name, - "openai_tts_model": openai_tts_model, - "input_face": input_face, - "face_padding_top": face_padding_top, - "face_padding_bottom": face_padding_bottom, - "face_padding_left": face_padding_left, - "face_padding_right": face_padding_right, - "sadtalker_settings": sadtalker_settings, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - VideoBotsPageStatusResponse, - parse_obj_as( - type_=VideoBotsPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/gooey/copilot_for_your_enterprise/types/__init__.py b/src/gooey/copilot_for_your_enterprise/types/__init__.py deleted file mode 100644 index dd7ed8b..0000000 --- a/src/gooey/copilot_for_your_enterprise/types/__init__.py +++ /dev/null @@ -1,25 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from .video_bots_page_request_asr_model import VideoBotsPageRequestAsrModel -from .video_bots_page_request_citation_style import VideoBotsPageRequestCitationStyle -from .video_bots_page_request_embedding_model import VideoBotsPageRequestEmbeddingModel -from .video_bots_page_request_lipsync_model import VideoBotsPageRequestLipsyncModel -from .video_bots_page_request_openai_tts_model import VideoBotsPageRequestOpenaiTtsModel -from .video_bots_page_request_openai_voice_name import VideoBotsPageRequestOpenaiVoiceName -from .video_bots_page_request_response_format_type import VideoBotsPageRequestResponseFormatType -from .video_bots_page_request_selected_model import VideoBotsPageRequestSelectedModel -from .video_bots_page_request_translation_model import VideoBotsPageRequestTranslationModel -from .video_bots_page_request_tts_provider import VideoBotsPageRequestTtsProvider - -__all__ = [ - "VideoBotsPageRequestAsrModel", - "VideoBotsPageRequestCitationStyle", - "VideoBotsPageRequestEmbeddingModel", - "VideoBotsPageRequestLipsyncModel", - "VideoBotsPageRequestOpenaiTtsModel", - "VideoBotsPageRequestOpenaiVoiceName", - "VideoBotsPageRequestResponseFormatType", - "VideoBotsPageRequestSelectedModel", - "VideoBotsPageRequestTranslationModel", - "VideoBotsPageRequestTtsProvider", -] diff --git a/src/gooey/core/client_wrapper.py b/src/gooey/core/client_wrapper.py index 97888d8..dfbc8e1 100644 --- a/src/gooey/core/client_wrapper.py +++ b/src/gooey/core/client_wrapper.py @@ -22,7 +22,7 @@ def get_headers(self) -> typing.Dict[str, str]: headers: typing.Dict[str, str] = { "X-Fern-Language": "Python", "X-Fern-SDK-Name": "gooeyai", - "X-Fern-SDK-Version": "0.0.1-beta9", + "X-Fern-SDK-Version": "0.0.1-beta10", } headers["Authorization"] = f"Bearer {self._get_api_key()}" return headers diff --git a/src/gooey/errors/__init__.py b/src/gooey/errors/__init__.py index 19ea9c4..5d9271d 100644 --- a/src/gooey/errors/__init__.py +++ b/src/gooey/errors/__init__.py @@ -1,7 +1,6 @@ # This file was auto-generated by Fern from our API Definition. from .payment_required_error import PaymentRequiredError -from .too_many_requests_error import TooManyRequestsError from .unprocessable_entity_error import UnprocessableEntityError -__all__ = ["PaymentRequiredError", "TooManyRequestsError", "UnprocessableEntityError"] +__all__ = ["PaymentRequiredError", "UnprocessableEntityError"] diff --git a/src/gooey/errors/too_many_requests_error.py b/src/gooey/errors/too_many_requests_error.py deleted file mode 100644 index 81d358c..0000000 --- a/src/gooey/errors/too_many_requests_error.py +++ /dev/null @@ -1,9 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from ..core.api_error import ApiError -from ..types.generic_error_response import GenericErrorResponse - - -class TooManyRequestsError(ApiError): - def __init__(self, body: GenericErrorResponse): - super().__init__(status_code=429, body=body) diff --git a/src/gooey/evaluator/__init__.py b/src/gooey/evaluator/__init__.py deleted file mode 100644 index 7ceefb0..0000000 --- a/src/gooey/evaluator/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from .types import BulkEvalPageRequestResponseFormatType, BulkEvalPageRequestSelectedModel - -__all__ = ["BulkEvalPageRequestResponseFormatType", "BulkEvalPageRequestSelectedModel"] diff --git a/src/gooey/evaluator/client.py b/src/gooey/evaluator/client.py deleted file mode 100644 index e3734ec..0000000 --- a/src/gooey/evaluator/client.py +++ /dev/null @@ -1,342 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing -from ..core.client_wrapper import SyncClientWrapper -from ..types.recipe_function import RecipeFunction -from ..types.eval_prompt import EvalPrompt -from ..types.agg_function import AggFunction -from .types.bulk_eval_page_request_selected_model import BulkEvalPageRequestSelectedModel -from .types.bulk_eval_page_request_response_format_type import BulkEvalPageRequestResponseFormatType -from ..types.run_settings import RunSettings -from ..core.request_options import RequestOptions -from ..types.bulk_eval_page_status_response import BulkEvalPageStatusResponse -from ..core.pydantic_utilities import parse_obj_as -from ..errors.payment_required_error import PaymentRequiredError -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.http_validation_error import HttpValidationError -from ..errors.too_many_requests_error import TooManyRequestsError -from ..types.generic_error_response import GenericErrorResponse -from json.decoder import JSONDecodeError -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper - -# this is used as the default value for optional parameters -OMIT = typing.cast(typing.Any, ...) - - -class EvaluatorClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper - - def async_bulk_eval( - self, - *, - documents: typing.Sequence[str], - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - eval_prompts: typing.Optional[typing.Sequence[EvalPrompt]] = OMIT, - agg_functions: typing.Optional[typing.Sequence[AggFunction]] = OMIT, - selected_model: typing.Optional[BulkEvalPageRequestSelectedModel] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[BulkEvalPageRequestResponseFormatType] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> BulkEvalPageStatusResponse: - """ - Parameters - ---------- - documents : typing.Sequence[str] - - Upload or link to a CSV or google sheet that contains your sample input data. - For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs. - Remember to includes header names in your CSV too. - - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - eval_prompts : typing.Optional[typing.Sequence[EvalPrompt]] - - Specify custom LLM prompts to calculate metrics that evaluate each row of the input data. The output should be a JSON object mapping the metric names to values. - _The `columns` dictionary can be used to reference the spreadsheet columns._ - - - agg_functions : typing.Optional[typing.Sequence[AggFunction]] - - Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/pandas-docs/stable/reference/groupby.html#dataframegroupby-computations-descriptive-stats). - - - selected_model : typing.Optional[BulkEvalPageRequestSelectedModel] - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - response_format_type : typing.Optional[BulkEvalPageRequestResponseFormatType] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - BulkEvalPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.evaluator.async_bulk_eval( - documents=["documents"], - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/bulk-eval/async", - method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "documents": documents, - "eval_prompts": eval_prompts, - "agg_functions": agg_functions, - "selected_model": selected_model, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "response_format_type": response_format_type, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - BulkEvalPageStatusResponse, - parse_obj_as( - type_=BulkEvalPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - -class AsyncEvaluatorClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def async_bulk_eval( - self, - *, - documents: typing.Sequence[str], - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - eval_prompts: typing.Optional[typing.Sequence[EvalPrompt]] = OMIT, - agg_functions: typing.Optional[typing.Sequence[AggFunction]] = OMIT, - selected_model: typing.Optional[BulkEvalPageRequestSelectedModel] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[BulkEvalPageRequestResponseFormatType] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> BulkEvalPageStatusResponse: - """ - Parameters - ---------- - documents : typing.Sequence[str] - - Upload or link to a CSV or google sheet that contains your sample input data. - For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs. - Remember to includes header names in your CSV too. - - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - eval_prompts : typing.Optional[typing.Sequence[EvalPrompt]] - - Specify custom LLM prompts to calculate metrics that evaluate each row of the input data. The output should be a JSON object mapping the metric names to values. - _The `columns` dictionary can be used to reference the spreadsheet columns._ - - - agg_functions : typing.Optional[typing.Sequence[AggFunction]] - - Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/pandas-docs/stable/reference/groupby.html#dataframegroupby-computations-descriptive-stats). - - - selected_model : typing.Optional[BulkEvalPageRequestSelectedModel] - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - response_format_type : typing.Optional[BulkEvalPageRequestResponseFormatType] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - BulkEvalPageStatusResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.evaluator.async_bulk_eval( - documents=["documents"], - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/bulk-eval/async", - method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "documents": documents, - "eval_prompts": eval_prompts, - "agg_functions": agg_functions, - "selected_model": selected_model, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "response_format_type": response_format_type, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - BulkEvalPageStatusResponse, - parse_obj_as( - type_=BulkEvalPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/gooey/evaluator/types/__init__.py b/src/gooey/evaluator/types/__init__.py deleted file mode 100644 index 67f1384..0000000 --- a/src/gooey/evaluator/types/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from .bulk_eval_page_request_response_format_type import BulkEvalPageRequestResponseFormatType -from .bulk_eval_page_request_selected_model import BulkEvalPageRequestSelectedModel - -__all__ = ["BulkEvalPageRequestResponseFormatType", "BulkEvalPageRequestSelectedModel"] diff --git a/src/gooey/functions/__init__.py b/src/gooey/functions/__init__.py deleted file mode 100644 index f3ea265..0000000 --- a/src/gooey/functions/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - diff --git a/src/gooey/functions/client.py b/src/gooey/functions/client.py deleted file mode 100644 index 0479229..0000000 --- a/src/gooey/functions/client.py +++ /dev/null @@ -1,231 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing -from ..core.client_wrapper import SyncClientWrapper -from ..types.run_settings import RunSettings -from ..core.request_options import RequestOptions -from ..types.functions_page_status_response import FunctionsPageStatusResponse -from ..core.pydantic_utilities import parse_obj_as -from ..errors.payment_required_error import PaymentRequiredError -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.http_validation_error import HttpValidationError -from ..errors.too_many_requests_error import TooManyRequestsError -from ..types.generic_error_response import GenericErrorResponse -from json.decoder import JSONDecodeError -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper - -# this is used as the default value for optional parameters -OMIT = typing.cast(typing.Any, ...) - - -class FunctionsClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper - - def async_functions( - self, - *, - example_id: typing.Optional[str] = None, - code: typing.Optional[str] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> FunctionsPageStatusResponse: - """ - Parameters - ---------- - example_id : typing.Optional[str] - - code : typing.Optional[str] - The JS code to be executed. - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used in the code - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - FunctionsPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.functions.async_functions() - """ - _response = self._client_wrapper.httpx_client.request( - "v3/functions/async", - method="POST", - params={ - "example_id": example_id, - }, - json={ - "code": code, - "variables": variables, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - FunctionsPageStatusResponse, - parse_obj_as( - type_=FunctionsPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - -class AsyncFunctionsClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def async_functions( - self, - *, - example_id: typing.Optional[str] = None, - code: typing.Optional[str] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> FunctionsPageStatusResponse: - """ - Parameters - ---------- - example_id : typing.Optional[str] - - code : typing.Optional[str] - The JS code to be executed. - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used in the code - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - FunctionsPageStatusResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.functions.async_functions() - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/functions/async", - method="POST", - params={ - "example_id": example_id, - }, - json={ - "code": code, - "variables": variables, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - FunctionsPageStatusResponse, - parse_obj_as( - type_=FunctionsPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/gooey/lip_syncing/__init__.py b/src/gooey/lip_syncing/__init__.py deleted file mode 100644 index 4d094b1..0000000 --- a/src/gooey/lip_syncing/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from .types import LipsyncPageRequestSelectedModel - -__all__ = ["LipsyncPageRequestSelectedModel"] diff --git a/src/gooey/lip_syncing/client.py b/src/gooey/lip_syncing/client.py deleted file mode 100644 index 1ece28c..0000000 --- a/src/gooey/lip_syncing/client.py +++ /dev/null @@ -1,296 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing -from ..core.client_wrapper import SyncClientWrapper -from ..types.recipe_function import RecipeFunction -from ..types.sad_talker_settings import SadTalkerSettings -from .types.lipsync_page_request_selected_model import LipsyncPageRequestSelectedModel -from ..types.run_settings import RunSettings -from ..core.request_options import RequestOptions -from ..types.lipsync_page_status_response import LipsyncPageStatusResponse -from ..core.pydantic_utilities import parse_obj_as -from ..errors.payment_required_error import PaymentRequiredError -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.http_validation_error import HttpValidationError -from ..errors.too_many_requests_error import TooManyRequestsError -from ..types.generic_error_response import GenericErrorResponse -from json.decoder import JSONDecodeError -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper - -# this is used as the default value for optional parameters -OMIT = typing.cast(typing.Any, ...) - - -class LipSyncingClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper - - def async_lipsync( - self, - *, - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - input_face: typing.Optional[str] = OMIT, - face_padding_top: typing.Optional[int] = OMIT, - face_padding_bottom: typing.Optional[int] = OMIT, - face_padding_left: typing.Optional[int] = OMIT, - face_padding_right: typing.Optional[int] = OMIT, - sadtalker_settings: typing.Optional[SadTalkerSettings] = OMIT, - selected_model: typing.Optional[LipsyncPageRequestSelectedModel] = OMIT, - input_audio: typing.Optional[str] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> LipsyncPageStatusResponse: - """ - Parameters - ---------- - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - input_face : typing.Optional[str] - - face_padding_top : typing.Optional[int] - - face_padding_bottom : typing.Optional[int] - - face_padding_left : typing.Optional[int] - - face_padding_right : typing.Optional[int] - - sadtalker_settings : typing.Optional[SadTalkerSettings] - - selected_model : typing.Optional[LipsyncPageRequestSelectedModel] - - input_audio : typing.Optional[str] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - LipsyncPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.lip_syncing.async_lipsync() - """ - _response = self._client_wrapper.httpx_client.request( - "v3/Lipsync/async", - method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "input_face": input_face, - "face_padding_top": face_padding_top, - "face_padding_bottom": face_padding_bottom, - "face_padding_left": face_padding_left, - "face_padding_right": face_padding_right, - "sadtalker_settings": sadtalker_settings, - "selected_model": selected_model, - "input_audio": input_audio, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - LipsyncPageStatusResponse, - parse_obj_as( - type_=LipsyncPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - -class AsyncLipSyncingClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def async_lipsync( - self, - *, - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - input_face: typing.Optional[str] = OMIT, - face_padding_top: typing.Optional[int] = OMIT, - face_padding_bottom: typing.Optional[int] = OMIT, - face_padding_left: typing.Optional[int] = OMIT, - face_padding_right: typing.Optional[int] = OMIT, - sadtalker_settings: typing.Optional[SadTalkerSettings] = OMIT, - selected_model: typing.Optional[LipsyncPageRequestSelectedModel] = OMIT, - input_audio: typing.Optional[str] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> LipsyncPageStatusResponse: - """ - Parameters - ---------- - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - input_face : typing.Optional[str] - - face_padding_top : typing.Optional[int] - - face_padding_bottom : typing.Optional[int] - - face_padding_left : typing.Optional[int] - - face_padding_right : typing.Optional[int] - - sadtalker_settings : typing.Optional[SadTalkerSettings] - - selected_model : typing.Optional[LipsyncPageRequestSelectedModel] - - input_audio : typing.Optional[str] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - LipsyncPageStatusResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.lip_syncing.async_lipsync() - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/Lipsync/async", - method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "input_face": input_face, - "face_padding_top": face_padding_top, - "face_padding_bottom": face_padding_bottom, - "face_padding_left": face_padding_left, - "face_padding_right": face_padding_right, - "sadtalker_settings": sadtalker_settings, - "selected_model": selected_model, - "input_audio": input_audio, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - LipsyncPageStatusResponse, - parse_obj_as( - type_=LipsyncPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/gooey/lip_syncing/types/__init__.py b/src/gooey/lip_syncing/types/__init__.py deleted file mode 100644 index e7e3b85..0000000 --- a/src/gooey/lip_syncing/types/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from .lipsync_page_request_selected_model import LipsyncPageRequestSelectedModel - -__all__ = ["LipsyncPageRequestSelectedModel"] diff --git a/src/gooey/smart_gpt/__init__.py b/src/gooey/smart_gpt/__init__.py deleted file mode 100644 index fce5f3e..0000000 --- a/src/gooey/smart_gpt/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from .types import SmartGptPageRequestResponseFormatType, SmartGptPageRequestSelectedModel - -__all__ = ["SmartGptPageRequestResponseFormatType", "SmartGptPageRequestSelectedModel"] diff --git a/src/gooey/smart_gpt/client.py b/src/gooey/smart_gpt/client.py deleted file mode 100644 index bad19d2..0000000 --- a/src/gooey/smart_gpt/client.py +++ /dev/null @@ -1,324 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing -from ..core.client_wrapper import SyncClientWrapper -from ..types.recipe_function import RecipeFunction -from .types.smart_gpt_page_request_selected_model import SmartGptPageRequestSelectedModel -from .types.smart_gpt_page_request_response_format_type import SmartGptPageRequestResponseFormatType -from ..types.run_settings import RunSettings -from ..core.request_options import RequestOptions -from ..types.smart_gpt_page_status_response import SmartGptPageStatusResponse -from ..core.pydantic_utilities import parse_obj_as -from ..errors.payment_required_error import PaymentRequiredError -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.http_validation_error import HttpValidationError -from ..errors.too_many_requests_error import TooManyRequestsError -from ..types.generic_error_response import GenericErrorResponse -from json.decoder import JSONDecodeError -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper - -# this is used as the default value for optional parameters -OMIT = typing.cast(typing.Any, ...) - - -class SmartGptClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper - - def async_smart_gpt( - self, - *, - input_prompt: str, - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - cot_prompt: typing.Optional[str] = OMIT, - reflexion_prompt: typing.Optional[str] = OMIT, - dera_prompt: typing.Optional[str] = OMIT, - selected_model: typing.Optional[SmartGptPageRequestSelectedModel] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[SmartGptPageRequestResponseFormatType] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> SmartGptPageStatusResponse: - """ - Parameters - ---------- - input_prompt : str - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - cot_prompt : typing.Optional[str] - - reflexion_prompt : typing.Optional[str] - - dera_prompt : typing.Optional[str] - - selected_model : typing.Optional[SmartGptPageRequestSelectedModel] - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - response_format_type : typing.Optional[SmartGptPageRequestResponseFormatType] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - SmartGptPageStatusResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.smart_gpt.async_smart_gpt( - input_prompt="input_prompt", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/SmartGPT/async", - method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "input_prompt": input_prompt, - "cot_prompt": cot_prompt, - "reflexion_prompt": reflexion_prompt, - "dera_prompt": dera_prompt, - "selected_model": selected_model, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "response_format_type": response_format_type, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - SmartGptPageStatusResponse, - parse_obj_as( - type_=SmartGptPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - -class AsyncSmartGptClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def async_smart_gpt( - self, - *, - input_prompt: str, - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - cot_prompt: typing.Optional[str] = OMIT, - reflexion_prompt: typing.Optional[str] = OMIT, - dera_prompt: typing.Optional[str] = OMIT, - selected_model: typing.Optional[SmartGptPageRequestSelectedModel] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[SmartGptPageRequestResponseFormatType] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> SmartGptPageStatusResponse: - """ - Parameters - ---------- - input_prompt : str - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - cot_prompt : typing.Optional[str] - - reflexion_prompt : typing.Optional[str] - - dera_prompt : typing.Optional[str] - - selected_model : typing.Optional[SmartGptPageRequestSelectedModel] - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - response_format_type : typing.Optional[SmartGptPageRequestResponseFormatType] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - SmartGptPageStatusResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.smart_gpt.async_smart_gpt( - input_prompt="input_prompt", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/SmartGPT/async", - method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "input_prompt": input_prompt, - "cot_prompt": cot_prompt, - "reflexion_prompt": reflexion_prompt, - "dera_prompt": dera_prompt, - "selected_model": selected_model, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "response_format_type": response_format_type, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - SmartGptPageStatusResponse, - parse_obj_as( - type_=SmartGptPageStatusResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/gooey/smart_gpt/types/__init__.py b/src/gooey/smart_gpt/types/__init__.py deleted file mode 100644 index 3032d41..0000000 --- a/src/gooey/smart_gpt/types/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from .smart_gpt_page_request_response_format_type import SmartGptPageRequestResponseFormatType -from .smart_gpt_page_request_selected_model import SmartGptPageRequestSelectedModel - -__all__ = ["SmartGptPageRequestResponseFormatType", "SmartGptPageRequestSelectedModel"] diff --git a/src/gooey/types/__init__.py b/src/gooey/types/__init__.py index 83278eb..2f81897 100644 --- a/src/gooey/types/__init__.py +++ b/src/gooey/types/__init__.py @@ -9,6 +9,9 @@ from .asr_output_json import AsrOutputJson from .asr_page_output import AsrPageOutput from .asr_page_output_output_text_item import AsrPageOutputOutputTextItem +from .asr_page_request import AsrPageRequest +from .asr_page_request_functions_item import AsrPageRequestFunctionsItem +from .asr_page_request_functions_item_trigger import AsrPageRequestFunctionsItemTrigger from .asr_page_request_output_format import AsrPageRequestOutputFormat from .asr_page_request_selected_model import AsrPageRequestSelectedModel from .asr_page_request_translation_model import AsrPageRequestTranslationModel @@ -17,8 +20,19 @@ from .balance_response import BalanceResponse from .bot_broadcast_filters import BotBroadcastFilters from .bulk_eval_page_output import BulkEvalPageOutput +from .bulk_eval_page_request import BulkEvalPageRequest +from .bulk_eval_page_request_agg_functions_item import BulkEvalPageRequestAggFunctionsItem +from .bulk_eval_page_request_agg_functions_item_function import BulkEvalPageRequestAggFunctionsItemFunction +from .bulk_eval_page_request_eval_prompts_item import BulkEvalPageRequestEvalPromptsItem +from .bulk_eval_page_request_functions_item import BulkEvalPageRequestFunctionsItem +from .bulk_eval_page_request_functions_item_trigger import BulkEvalPageRequestFunctionsItemTrigger +from .bulk_eval_page_request_response_format_type import BulkEvalPageRequestResponseFormatType +from .bulk_eval_page_request_selected_model import BulkEvalPageRequestSelectedModel from .bulk_eval_page_status_response import BulkEvalPageStatusResponse from .bulk_runner_page_output import BulkRunnerPageOutput +from .bulk_runner_page_request import BulkRunnerPageRequest +from .bulk_runner_page_request_functions_item import BulkRunnerPageRequestFunctionsItem +from .bulk_runner_page_request_functions_item_trigger import BulkRunnerPageRequestFunctionsItemTrigger from .bulk_runner_page_status_response import BulkRunnerPageStatusResponse from .button_pressed import ButtonPressed from .called_function_response import CalledFunctionResponse @@ -27,16 +41,27 @@ from .chat_completion_content_part_text_param import ChatCompletionContentPartTextParam from .chyron_plant_page_output import ChyronPlantPageOutput from .chyron_plant_page_request import ChyronPlantPageRequest +from .chyron_plant_page_request_functions_item import ChyronPlantPageRequestFunctionsItem +from .chyron_plant_page_request_functions_item_trigger import ChyronPlantPageRequestFunctionsItemTrigger from .chyron_plant_page_status_response import ChyronPlantPageStatusResponse from .compare_llm_page_output import CompareLlmPageOutput +from .compare_llm_page_request import CompareLlmPageRequest +from .compare_llm_page_request_functions_item import CompareLlmPageRequestFunctionsItem +from .compare_llm_page_request_functions_item_trigger import CompareLlmPageRequestFunctionsItemTrigger from .compare_llm_page_request_response_format_type import CompareLlmPageRequestResponseFormatType from .compare_llm_page_request_selected_models_item import CompareLlmPageRequestSelectedModelsItem from .compare_llm_page_status_response import CompareLlmPageStatusResponse from .compare_text2img_page_output import CompareText2ImgPageOutput +from .compare_text2img_page_request import CompareText2ImgPageRequest +from .compare_text2img_page_request_functions_item import CompareText2ImgPageRequestFunctionsItem +from .compare_text2img_page_request_functions_item_trigger import CompareText2ImgPageRequestFunctionsItemTrigger from .compare_text2img_page_request_scheduler import CompareText2ImgPageRequestScheduler from .compare_text2img_page_request_selected_models_item import CompareText2ImgPageRequestSelectedModelsItem from .compare_text2img_page_status_response import CompareText2ImgPageStatusResponse from .compare_upscaler_page_output import CompareUpscalerPageOutput +from .compare_upscaler_page_request import CompareUpscalerPageRequest +from .compare_upscaler_page_request_functions_item import CompareUpscalerPageRequestFunctionsItem +from .compare_upscaler_page_request_functions_item_trigger import CompareUpscalerPageRequestFunctionsItemTrigger from .compare_upscaler_page_request_selected_models_item import CompareUpscalerPageRequestSelectedModelsItem from .compare_upscaler_page_status_response import CompareUpscalerPageStatusResponse from .console_logs import ConsoleLogs @@ -52,81 +77,139 @@ from .conversation_start import ConversationStart from .create_stream_response import CreateStreamResponse from .deforum_sd_page_output import DeforumSdPageOutput +from .deforum_sd_page_request import DeforumSdPageRequest +from .deforum_sd_page_request_animation_prompts_item import DeforumSdPageRequestAnimationPromptsItem +from .deforum_sd_page_request_functions_item import DeforumSdPageRequestFunctionsItem +from .deforum_sd_page_request_functions_item_trigger import DeforumSdPageRequestFunctionsItemTrigger from .deforum_sd_page_request_selected_model import DeforumSdPageRequestSelectedModel from .deforum_sd_page_status_response import DeforumSdPageStatusResponse from .doc_extract_page_output import DocExtractPageOutput +from .doc_extract_page_request import DocExtractPageRequest +from .doc_extract_page_request_functions_item import DocExtractPageRequestFunctionsItem +from .doc_extract_page_request_functions_item_trigger import DocExtractPageRequestFunctionsItemTrigger from .doc_extract_page_request_response_format_type import DocExtractPageRequestResponseFormatType from .doc_extract_page_request_selected_asr_model import DocExtractPageRequestSelectedAsrModel from .doc_extract_page_request_selected_model import DocExtractPageRequestSelectedModel from .doc_extract_page_status_response import DocExtractPageStatusResponse from .doc_search_page_output import DocSearchPageOutput +from .doc_search_page_request import DocSearchPageRequest from .doc_search_page_request_citation_style import DocSearchPageRequestCitationStyle from .doc_search_page_request_embedding_model import DocSearchPageRequestEmbeddingModel +from .doc_search_page_request_functions_item import DocSearchPageRequestFunctionsItem +from .doc_search_page_request_functions_item_trigger import DocSearchPageRequestFunctionsItemTrigger from .doc_search_page_request_keyword_query import DocSearchPageRequestKeywordQuery from .doc_search_page_request_response_format_type import DocSearchPageRequestResponseFormatType from .doc_search_page_request_selected_model import DocSearchPageRequestSelectedModel from .doc_search_page_status_response import DocSearchPageStatusResponse from .doc_summary_page_output import DocSummaryPageOutput +from .doc_summary_page_request import DocSummaryPageRequest +from .doc_summary_page_request_functions_item import DocSummaryPageRequestFunctionsItem +from .doc_summary_page_request_functions_item_trigger import DocSummaryPageRequestFunctionsItemTrigger from .doc_summary_page_request_response_format_type import DocSummaryPageRequestResponseFormatType from .doc_summary_page_request_selected_asr_model import DocSummaryPageRequestSelectedAsrModel from .doc_summary_page_request_selected_model import DocSummaryPageRequestSelectedModel from .doc_summary_page_status_response import DocSummaryPageStatusResponse from .email_face_inpainting_page_output import EmailFaceInpaintingPageOutput +from .email_face_inpainting_page_request import EmailFaceInpaintingPageRequest +from .email_face_inpainting_page_request_functions_item import EmailFaceInpaintingPageRequestFunctionsItem +from .email_face_inpainting_page_request_functions_item_trigger import ( + EmailFaceInpaintingPageRequestFunctionsItemTrigger, +) from .email_face_inpainting_page_request_selected_model import EmailFaceInpaintingPageRequestSelectedModel from .email_face_inpainting_page_status_response import EmailFaceInpaintingPageStatusResponse from .embeddings_page_output import EmbeddingsPageOutput +from .embeddings_page_request import EmbeddingsPageRequest +from .embeddings_page_request_functions_item import EmbeddingsPageRequestFunctionsItem +from .embeddings_page_request_functions_item_trigger import EmbeddingsPageRequestFunctionsItemTrigger from .embeddings_page_request_selected_model import EmbeddingsPageRequestSelectedModel from .embeddings_page_status_response import EmbeddingsPageStatusResponse from .eval_prompt import EvalPrompt from .face_inpainting_page_output import FaceInpaintingPageOutput +from .face_inpainting_page_request import FaceInpaintingPageRequest +from .face_inpainting_page_request_functions_item import FaceInpaintingPageRequestFunctionsItem +from .face_inpainting_page_request_functions_item_trigger import FaceInpaintingPageRequestFunctionsItemTrigger from .face_inpainting_page_request_selected_model import FaceInpaintingPageRequestSelectedModel from .face_inpainting_page_status_response import FaceInpaintingPageStatusResponse from .final_response import FinalResponse from .functions_page_output import FunctionsPageOutput +from .functions_page_request import FunctionsPageRequest from .functions_page_status_response import FunctionsPageStatusResponse from .generic_error_response import GenericErrorResponse from .generic_error_response_detail import GenericErrorResponseDetail from .google_gpt_page_output import GoogleGptPageOutput +from .google_gpt_page_request import GoogleGptPageRequest from .google_gpt_page_request_embedding_model import GoogleGptPageRequestEmbeddingModel +from .google_gpt_page_request_functions_item import GoogleGptPageRequestFunctionsItem +from .google_gpt_page_request_functions_item_trigger import GoogleGptPageRequestFunctionsItemTrigger from .google_gpt_page_request_response_format_type import GoogleGptPageRequestResponseFormatType from .google_gpt_page_request_selected_model import GoogleGptPageRequestSelectedModel from .google_gpt_page_status_response import GoogleGptPageStatusResponse from .google_image_gen_page_output import GoogleImageGenPageOutput +from .google_image_gen_page_request import GoogleImageGenPageRequest +from .google_image_gen_page_request_functions_item import GoogleImageGenPageRequestFunctionsItem +from .google_image_gen_page_request_functions_item_trigger import GoogleImageGenPageRequestFunctionsItemTrigger from .google_image_gen_page_request_selected_model import GoogleImageGenPageRequestSelectedModel from .google_image_gen_page_status_response import GoogleImageGenPageStatusResponse from .http_validation_error import HttpValidationError from .image_segmentation_page_output import ImageSegmentationPageOutput +from .image_segmentation_page_request import ImageSegmentationPageRequest +from .image_segmentation_page_request_functions_item import ImageSegmentationPageRequestFunctionsItem +from .image_segmentation_page_request_functions_item_trigger import ImageSegmentationPageRequestFunctionsItemTrigger from .image_segmentation_page_request_selected_model import ImageSegmentationPageRequestSelectedModel from .image_segmentation_page_status_response import ImageSegmentationPageStatusResponse from .image_url import ImageUrl from .image_url_detail import ImageUrlDetail from .img2img_page_output import Img2ImgPageOutput +from .img2img_page_request import Img2ImgPageRequest +from .img2img_page_request_functions_item import Img2ImgPageRequestFunctionsItem +from .img2img_page_request_functions_item_trigger import Img2ImgPageRequestFunctionsItemTrigger from .img2img_page_request_selected_controlnet_model import Img2ImgPageRequestSelectedControlnetModel from .img2img_page_request_selected_controlnet_model_item import Img2ImgPageRequestSelectedControlnetModelItem from .img2img_page_request_selected_model import Img2ImgPageRequestSelectedModel from .img2img_page_status_response import Img2ImgPageStatusResponse from .letter_writer_page_output import LetterWriterPageOutput from .letter_writer_page_request import LetterWriterPageRequest +from .letter_writer_page_request_example_letters_item import LetterWriterPageRequestExampleLettersItem +from .letter_writer_page_request_functions_item import LetterWriterPageRequestFunctionsItem +from .letter_writer_page_request_functions_item_trigger import LetterWriterPageRequestFunctionsItemTrigger from .letter_writer_page_status_response import LetterWriterPageStatusResponse from .lipsync_page_output import LipsyncPageOutput +from .lipsync_page_request import LipsyncPageRequest +from .lipsync_page_request_functions_item import LipsyncPageRequestFunctionsItem +from .lipsync_page_request_functions_item_trigger import LipsyncPageRequestFunctionsItemTrigger +from .lipsync_page_request_sadtalker_settings import LipsyncPageRequestSadtalkerSettings +from .lipsync_page_request_sadtalker_settings_preprocess import LipsyncPageRequestSadtalkerSettingsPreprocess +from .lipsync_page_request_selected_model import LipsyncPageRequestSelectedModel from .lipsync_page_status_response import LipsyncPageStatusResponse from .lipsync_tts_page_output import LipsyncTtsPageOutput +from .lipsync_tts_page_request import LipsyncTtsPageRequest +from .lipsync_tts_page_request_functions_item import LipsyncTtsPageRequestFunctionsItem +from .lipsync_tts_page_request_functions_item_trigger import LipsyncTtsPageRequestFunctionsItemTrigger from .lipsync_tts_page_request_openai_tts_model import LipsyncTtsPageRequestOpenaiTtsModel from .lipsync_tts_page_request_openai_voice_name import LipsyncTtsPageRequestOpenaiVoiceName +from .lipsync_tts_page_request_sadtalker_settings import LipsyncTtsPageRequestSadtalkerSettings +from .lipsync_tts_page_request_sadtalker_settings_preprocess import LipsyncTtsPageRequestSadtalkerSettingsPreprocess from .lipsync_tts_page_request_selected_model import LipsyncTtsPageRequestSelectedModel from .lipsync_tts_page_request_tts_provider import LipsyncTtsPageRequestTtsProvider from .lipsync_tts_page_status_response import LipsyncTtsPageStatusResponse from .llm_tools import LlmTools from .message_part import MessagePart from .object_inpainting_page_output import ObjectInpaintingPageOutput +from .object_inpainting_page_request import ObjectInpaintingPageRequest +from .object_inpainting_page_request_functions_item import ObjectInpaintingPageRequestFunctionsItem +from .object_inpainting_page_request_functions_item_trigger import ObjectInpaintingPageRequestFunctionsItemTrigger from .object_inpainting_page_request_selected_model import ObjectInpaintingPageRequestSelectedModel from .object_inpainting_page_status_response import ObjectInpaintingPageStatusResponse from .prompt_tree_node import PromptTreeNode from .prompt_tree_node_prompt import PromptTreeNodePrompt from .qr_code_generator_page_output import QrCodeGeneratorPageOutput +from .qr_code_generator_page_request import QrCodeGeneratorPageRequest +from .qr_code_generator_page_request_functions_item import QrCodeGeneratorPageRequestFunctionsItem +from .qr_code_generator_page_request_functions_item_trigger import QrCodeGeneratorPageRequestFunctionsItemTrigger from .qr_code_generator_page_request_image_prompt_controlnet_models_item import ( QrCodeGeneratorPageRequestImagePromptControlnetModelsItem, ) +from .qr_code_generator_page_request_qr_code_vcard import QrCodeGeneratorPageRequestQrCodeVcard from .qr_code_generator_page_request_scheduler import QrCodeGeneratorPageRequestScheduler from .qr_code_generator_page_request_selected_controlnet_model_item import ( QrCodeGeneratorPageRequestSelectedControlnetModelItem, @@ -139,14 +222,20 @@ from .related_doc_search_response import RelatedDocSearchResponse from .related_google_gpt_response import RelatedGoogleGptResponse from .related_qn_a_doc_page_output import RelatedQnADocPageOutput +from .related_qn_a_doc_page_request import RelatedQnADocPageRequest from .related_qn_a_doc_page_request_citation_style import RelatedQnADocPageRequestCitationStyle from .related_qn_a_doc_page_request_embedding_model import RelatedQnADocPageRequestEmbeddingModel +from .related_qn_a_doc_page_request_functions_item import RelatedQnADocPageRequestFunctionsItem +from .related_qn_a_doc_page_request_functions_item_trigger import RelatedQnADocPageRequestFunctionsItemTrigger from .related_qn_a_doc_page_request_keyword_query import RelatedQnADocPageRequestKeywordQuery from .related_qn_a_doc_page_request_response_format_type import RelatedQnADocPageRequestResponseFormatType from .related_qn_a_doc_page_request_selected_model import RelatedQnADocPageRequestSelectedModel from .related_qn_a_doc_page_status_response import RelatedQnADocPageStatusResponse from .related_qn_a_page_output import RelatedQnAPageOutput +from .related_qn_a_page_request import RelatedQnAPageRequest from .related_qn_a_page_request_embedding_model import RelatedQnAPageRequestEmbeddingModel +from .related_qn_a_page_request_functions_item import RelatedQnAPageRequestFunctionsItem +from .related_qn_a_page_request_functions_item_trigger import RelatedQnAPageRequestFunctionsItemTrigger from .related_qn_a_page_request_response_format_type import RelatedQnAPageRequestResponseFormatType from .related_qn_a_page_request_selected_model import RelatedQnAPageRequestSelectedModel from .related_qn_a_page_status_response import RelatedQnAPageStatusResponse @@ -161,27 +250,45 @@ from .sad_talker_settings_preprocess import SadTalkerSettingsPreprocess from .search_reference import SearchReference from .seo_summary_page_output import SeoSummaryPageOutput +from .seo_summary_page_request import SeoSummaryPageRequest from .seo_summary_page_request_response_format_type import SeoSummaryPageRequestResponseFormatType from .seo_summary_page_request_selected_model import SeoSummaryPageRequestSelectedModel from .seo_summary_page_status_response import SeoSummaryPageStatusResponse from .serp_search_location import SerpSearchLocation from .serp_search_type import SerpSearchType from .smart_gpt_page_output import SmartGptPageOutput +from .smart_gpt_page_request import SmartGptPageRequest +from .smart_gpt_page_request_functions_item import SmartGptPageRequestFunctionsItem +from .smart_gpt_page_request_functions_item_trigger import SmartGptPageRequestFunctionsItemTrigger +from .smart_gpt_page_request_response_format_type import SmartGptPageRequestResponseFormatType +from .smart_gpt_page_request_selected_model import SmartGptPageRequestSelectedModel from .smart_gpt_page_status_response import SmartGptPageStatusResponse from .social_lookup_email_page_output import SocialLookupEmailPageOutput +from .social_lookup_email_page_request import SocialLookupEmailPageRequest +from .social_lookup_email_page_request_functions_item import SocialLookupEmailPageRequestFunctionsItem +from .social_lookup_email_page_request_functions_item_trigger import SocialLookupEmailPageRequestFunctionsItemTrigger from .social_lookup_email_page_request_response_format_type import SocialLookupEmailPageRequestResponseFormatType from .social_lookup_email_page_request_selected_model import SocialLookupEmailPageRequestSelectedModel from .social_lookup_email_page_status_response import SocialLookupEmailPageStatusResponse from .stream_error import StreamError from .text2audio_page_output import Text2AudioPageOutput +from .text2audio_page_request import Text2AudioPageRequest +from .text2audio_page_request_functions_item import Text2AudioPageRequestFunctionsItem +from .text2audio_page_request_functions_item_trigger import Text2AudioPageRequestFunctionsItemTrigger from .text2audio_page_status_response import Text2AudioPageStatusResponse from .text_to_speech_page_output import TextToSpeechPageOutput +from .text_to_speech_page_request import TextToSpeechPageRequest +from .text_to_speech_page_request_functions_item import TextToSpeechPageRequestFunctionsItem +from .text_to_speech_page_request_functions_item_trigger import TextToSpeechPageRequestFunctionsItemTrigger from .text_to_speech_page_request_openai_tts_model import TextToSpeechPageRequestOpenaiTtsModel from .text_to_speech_page_request_openai_voice_name import TextToSpeechPageRequestOpenaiVoiceName from .text_to_speech_page_request_tts_provider import TextToSpeechPageRequestTtsProvider from .text_to_speech_page_status_response import TextToSpeechPageStatusResponse from .training_data_model import TrainingDataModel from .translation_page_output import TranslationPageOutput +from .translation_page_request import TranslationPageRequest +from .translation_page_request_functions_item import TranslationPageRequestFunctionsItem +from .translation_page_request_functions_item_trigger import TranslationPageRequestFunctionsItemTrigger from .translation_page_request_selected_model import TranslationPageRequestSelectedModel from .translation_page_status_response import TranslationPageStatusResponse from .validation_error import ValidationError @@ -190,6 +297,29 @@ from .video_bots_page_output import VideoBotsPageOutput from .video_bots_page_output_final_keyword_query import VideoBotsPageOutputFinalKeywordQuery from .video_bots_page_output_final_prompt import VideoBotsPageOutputFinalPrompt +from .video_bots_page_request import VideoBotsPageRequest +from .video_bots_page_request_asr_model import VideoBotsPageRequestAsrModel +from .video_bots_page_request_citation_style import VideoBotsPageRequestCitationStyle +from .video_bots_page_request_embedding_model import VideoBotsPageRequestEmbeddingModel +from .video_bots_page_request_functions_item import VideoBotsPageRequestFunctionsItem +from .video_bots_page_request_functions_item_trigger import VideoBotsPageRequestFunctionsItemTrigger +from .video_bots_page_request_lipsync_model import VideoBotsPageRequestLipsyncModel +from .video_bots_page_request_messages_item import VideoBotsPageRequestMessagesItem +from .video_bots_page_request_messages_item_content import VideoBotsPageRequestMessagesItemContent +from .video_bots_page_request_messages_item_content_item import ( + VideoBotsPageRequestMessagesItemContentItem, + VideoBotsPageRequestMessagesItemContentItem_ImageUrl, + VideoBotsPageRequestMessagesItemContentItem_Text, +) +from .video_bots_page_request_messages_item_role import VideoBotsPageRequestMessagesItemRole +from .video_bots_page_request_openai_tts_model import VideoBotsPageRequestOpenaiTtsModel +from .video_bots_page_request_openai_voice_name import VideoBotsPageRequestOpenaiVoiceName +from .video_bots_page_request_response_format_type import VideoBotsPageRequestResponseFormatType +from .video_bots_page_request_sadtalker_settings import VideoBotsPageRequestSadtalkerSettings +from .video_bots_page_request_sadtalker_settings_preprocess import VideoBotsPageRequestSadtalkerSettingsPreprocess +from .video_bots_page_request_selected_model import VideoBotsPageRequestSelectedModel +from .video_bots_page_request_translation_model import VideoBotsPageRequestTranslationModel +from .video_bots_page_request_tts_provider import VideoBotsPageRequestTtsProvider from .video_bots_page_status_response import VideoBotsPageStatusResponse __all__ = [ @@ -202,6 +332,9 @@ "AsrOutputJson", "AsrPageOutput", "AsrPageOutputOutputTextItem", + "AsrPageRequest", + "AsrPageRequestFunctionsItem", + "AsrPageRequestFunctionsItemTrigger", "AsrPageRequestOutputFormat", "AsrPageRequestSelectedModel", "AsrPageRequestTranslationModel", @@ -210,8 +343,19 @@ "BalanceResponse", "BotBroadcastFilters", "BulkEvalPageOutput", + "BulkEvalPageRequest", + "BulkEvalPageRequestAggFunctionsItem", + "BulkEvalPageRequestAggFunctionsItemFunction", + "BulkEvalPageRequestEvalPromptsItem", + "BulkEvalPageRequestFunctionsItem", + "BulkEvalPageRequestFunctionsItemTrigger", + "BulkEvalPageRequestResponseFormatType", + "BulkEvalPageRequestSelectedModel", "BulkEvalPageStatusResponse", "BulkRunnerPageOutput", + "BulkRunnerPageRequest", + "BulkRunnerPageRequestFunctionsItem", + "BulkRunnerPageRequestFunctionsItemTrigger", "BulkRunnerPageStatusResponse", "ButtonPressed", "CalledFunctionResponse", @@ -220,16 +364,27 @@ "ChatCompletionContentPartTextParam", "ChyronPlantPageOutput", "ChyronPlantPageRequest", + "ChyronPlantPageRequestFunctionsItem", + "ChyronPlantPageRequestFunctionsItemTrigger", "ChyronPlantPageStatusResponse", "CompareLlmPageOutput", + "CompareLlmPageRequest", + "CompareLlmPageRequestFunctionsItem", + "CompareLlmPageRequestFunctionsItemTrigger", "CompareLlmPageRequestResponseFormatType", "CompareLlmPageRequestSelectedModelsItem", "CompareLlmPageStatusResponse", "CompareText2ImgPageOutput", + "CompareText2ImgPageRequest", + "CompareText2ImgPageRequestFunctionsItem", + "CompareText2ImgPageRequestFunctionsItemTrigger", "CompareText2ImgPageRequestScheduler", "CompareText2ImgPageRequestSelectedModelsItem", "CompareText2ImgPageStatusResponse", "CompareUpscalerPageOutput", + "CompareUpscalerPageRequest", + "CompareUpscalerPageRequestFunctionsItem", + "CompareUpscalerPageRequestFunctionsItemTrigger", "CompareUpscalerPageRequestSelectedModelsItem", "CompareUpscalerPageStatusResponse", "ConsoleLogs", @@ -243,79 +398,135 @@ "ConversationStart", "CreateStreamResponse", "DeforumSdPageOutput", + "DeforumSdPageRequest", + "DeforumSdPageRequestAnimationPromptsItem", + "DeforumSdPageRequestFunctionsItem", + "DeforumSdPageRequestFunctionsItemTrigger", "DeforumSdPageRequestSelectedModel", "DeforumSdPageStatusResponse", "DocExtractPageOutput", + "DocExtractPageRequest", + "DocExtractPageRequestFunctionsItem", + "DocExtractPageRequestFunctionsItemTrigger", "DocExtractPageRequestResponseFormatType", "DocExtractPageRequestSelectedAsrModel", "DocExtractPageRequestSelectedModel", "DocExtractPageStatusResponse", "DocSearchPageOutput", + "DocSearchPageRequest", "DocSearchPageRequestCitationStyle", "DocSearchPageRequestEmbeddingModel", + "DocSearchPageRequestFunctionsItem", + "DocSearchPageRequestFunctionsItemTrigger", "DocSearchPageRequestKeywordQuery", "DocSearchPageRequestResponseFormatType", "DocSearchPageRequestSelectedModel", "DocSearchPageStatusResponse", "DocSummaryPageOutput", + "DocSummaryPageRequest", + "DocSummaryPageRequestFunctionsItem", + "DocSummaryPageRequestFunctionsItemTrigger", "DocSummaryPageRequestResponseFormatType", "DocSummaryPageRequestSelectedAsrModel", "DocSummaryPageRequestSelectedModel", "DocSummaryPageStatusResponse", "EmailFaceInpaintingPageOutput", + "EmailFaceInpaintingPageRequest", + "EmailFaceInpaintingPageRequestFunctionsItem", + "EmailFaceInpaintingPageRequestFunctionsItemTrigger", "EmailFaceInpaintingPageRequestSelectedModel", "EmailFaceInpaintingPageStatusResponse", "EmbeddingsPageOutput", + "EmbeddingsPageRequest", + "EmbeddingsPageRequestFunctionsItem", + "EmbeddingsPageRequestFunctionsItemTrigger", "EmbeddingsPageRequestSelectedModel", "EmbeddingsPageStatusResponse", "EvalPrompt", "FaceInpaintingPageOutput", + "FaceInpaintingPageRequest", + "FaceInpaintingPageRequestFunctionsItem", + "FaceInpaintingPageRequestFunctionsItemTrigger", "FaceInpaintingPageRequestSelectedModel", "FaceInpaintingPageStatusResponse", "FinalResponse", "FunctionsPageOutput", + "FunctionsPageRequest", "FunctionsPageStatusResponse", "GenericErrorResponse", "GenericErrorResponseDetail", "GoogleGptPageOutput", + "GoogleGptPageRequest", "GoogleGptPageRequestEmbeddingModel", + "GoogleGptPageRequestFunctionsItem", + "GoogleGptPageRequestFunctionsItemTrigger", "GoogleGptPageRequestResponseFormatType", "GoogleGptPageRequestSelectedModel", "GoogleGptPageStatusResponse", "GoogleImageGenPageOutput", + "GoogleImageGenPageRequest", + "GoogleImageGenPageRequestFunctionsItem", + "GoogleImageGenPageRequestFunctionsItemTrigger", "GoogleImageGenPageRequestSelectedModel", "GoogleImageGenPageStatusResponse", "HttpValidationError", "ImageSegmentationPageOutput", + "ImageSegmentationPageRequest", + "ImageSegmentationPageRequestFunctionsItem", + "ImageSegmentationPageRequestFunctionsItemTrigger", "ImageSegmentationPageRequestSelectedModel", "ImageSegmentationPageStatusResponse", "ImageUrl", "ImageUrlDetail", "Img2ImgPageOutput", + "Img2ImgPageRequest", + "Img2ImgPageRequestFunctionsItem", + "Img2ImgPageRequestFunctionsItemTrigger", "Img2ImgPageRequestSelectedControlnetModel", "Img2ImgPageRequestSelectedControlnetModelItem", "Img2ImgPageRequestSelectedModel", "Img2ImgPageStatusResponse", "LetterWriterPageOutput", "LetterWriterPageRequest", + "LetterWriterPageRequestExampleLettersItem", + "LetterWriterPageRequestFunctionsItem", + "LetterWriterPageRequestFunctionsItemTrigger", "LetterWriterPageStatusResponse", "LipsyncPageOutput", + "LipsyncPageRequest", + "LipsyncPageRequestFunctionsItem", + "LipsyncPageRequestFunctionsItemTrigger", + "LipsyncPageRequestSadtalkerSettings", + "LipsyncPageRequestSadtalkerSettingsPreprocess", + "LipsyncPageRequestSelectedModel", "LipsyncPageStatusResponse", "LipsyncTtsPageOutput", + "LipsyncTtsPageRequest", + "LipsyncTtsPageRequestFunctionsItem", + "LipsyncTtsPageRequestFunctionsItemTrigger", "LipsyncTtsPageRequestOpenaiTtsModel", "LipsyncTtsPageRequestOpenaiVoiceName", + "LipsyncTtsPageRequestSadtalkerSettings", + "LipsyncTtsPageRequestSadtalkerSettingsPreprocess", "LipsyncTtsPageRequestSelectedModel", "LipsyncTtsPageRequestTtsProvider", "LipsyncTtsPageStatusResponse", "LlmTools", "MessagePart", "ObjectInpaintingPageOutput", + "ObjectInpaintingPageRequest", + "ObjectInpaintingPageRequestFunctionsItem", + "ObjectInpaintingPageRequestFunctionsItemTrigger", "ObjectInpaintingPageRequestSelectedModel", "ObjectInpaintingPageStatusResponse", "PromptTreeNode", "PromptTreeNodePrompt", "QrCodeGeneratorPageOutput", + "QrCodeGeneratorPageRequest", + "QrCodeGeneratorPageRequestFunctionsItem", + "QrCodeGeneratorPageRequestFunctionsItemTrigger", "QrCodeGeneratorPageRequestImagePromptControlnetModelsItem", + "QrCodeGeneratorPageRequestQrCodeVcard", "QrCodeGeneratorPageRequestScheduler", "QrCodeGeneratorPageRequestSelectedControlnetModelItem", "QrCodeGeneratorPageRequestSelectedModel", @@ -326,14 +537,20 @@ "RelatedDocSearchResponse", "RelatedGoogleGptResponse", "RelatedQnADocPageOutput", + "RelatedQnADocPageRequest", "RelatedQnADocPageRequestCitationStyle", "RelatedQnADocPageRequestEmbeddingModel", + "RelatedQnADocPageRequestFunctionsItem", + "RelatedQnADocPageRequestFunctionsItemTrigger", "RelatedQnADocPageRequestKeywordQuery", "RelatedQnADocPageRequestResponseFormatType", "RelatedQnADocPageRequestSelectedModel", "RelatedQnADocPageStatusResponse", "RelatedQnAPageOutput", + "RelatedQnAPageRequest", "RelatedQnAPageRequestEmbeddingModel", + "RelatedQnAPageRequestFunctionsItem", + "RelatedQnAPageRequestFunctionsItemTrigger", "RelatedQnAPageRequestResponseFormatType", "RelatedQnAPageRequestSelectedModel", "RelatedQnAPageStatusResponse", @@ -348,27 +565,45 @@ "SadTalkerSettingsPreprocess", "SearchReference", "SeoSummaryPageOutput", + "SeoSummaryPageRequest", "SeoSummaryPageRequestResponseFormatType", "SeoSummaryPageRequestSelectedModel", "SeoSummaryPageStatusResponse", "SerpSearchLocation", "SerpSearchType", "SmartGptPageOutput", + "SmartGptPageRequest", + "SmartGptPageRequestFunctionsItem", + "SmartGptPageRequestFunctionsItemTrigger", + "SmartGptPageRequestResponseFormatType", + "SmartGptPageRequestSelectedModel", "SmartGptPageStatusResponse", "SocialLookupEmailPageOutput", + "SocialLookupEmailPageRequest", + "SocialLookupEmailPageRequestFunctionsItem", + "SocialLookupEmailPageRequestFunctionsItemTrigger", "SocialLookupEmailPageRequestResponseFormatType", "SocialLookupEmailPageRequestSelectedModel", "SocialLookupEmailPageStatusResponse", "StreamError", "Text2AudioPageOutput", + "Text2AudioPageRequest", + "Text2AudioPageRequestFunctionsItem", + "Text2AudioPageRequestFunctionsItemTrigger", "Text2AudioPageStatusResponse", "TextToSpeechPageOutput", + "TextToSpeechPageRequest", + "TextToSpeechPageRequestFunctionsItem", + "TextToSpeechPageRequestFunctionsItemTrigger", "TextToSpeechPageRequestOpenaiTtsModel", "TextToSpeechPageRequestOpenaiVoiceName", "TextToSpeechPageRequestTtsProvider", "TextToSpeechPageStatusResponse", "TrainingDataModel", "TranslationPageOutput", + "TranslationPageRequest", + "TranslationPageRequestFunctionsItem", + "TranslationPageRequestFunctionsItemTrigger", "TranslationPageRequestSelectedModel", "TranslationPageStatusResponse", "ValidationError", @@ -377,5 +612,26 @@ "VideoBotsPageOutput", "VideoBotsPageOutputFinalKeywordQuery", "VideoBotsPageOutputFinalPrompt", + "VideoBotsPageRequest", + "VideoBotsPageRequestAsrModel", + "VideoBotsPageRequestCitationStyle", + "VideoBotsPageRequestEmbeddingModel", + "VideoBotsPageRequestFunctionsItem", + "VideoBotsPageRequestFunctionsItemTrigger", + "VideoBotsPageRequestLipsyncModel", + "VideoBotsPageRequestMessagesItem", + "VideoBotsPageRequestMessagesItemContent", + "VideoBotsPageRequestMessagesItemContentItem", + "VideoBotsPageRequestMessagesItemContentItem_ImageUrl", + "VideoBotsPageRequestMessagesItemContentItem_Text", + "VideoBotsPageRequestMessagesItemRole", + "VideoBotsPageRequestOpenaiTtsModel", + "VideoBotsPageRequestOpenaiVoiceName", + "VideoBotsPageRequestResponseFormatType", + "VideoBotsPageRequestSadtalkerSettings", + "VideoBotsPageRequestSadtalkerSettingsPreprocess", + "VideoBotsPageRequestSelectedModel", + "VideoBotsPageRequestTranslationModel", + "VideoBotsPageRequestTtsProvider", "VideoBotsPageStatusResponse", ] diff --git a/src/gooey/types/asr_page_request.py b/src/gooey/types/asr_page_request.py new file mode 100644 index 0000000..6cd22a5 --- /dev/null +++ b/src/gooey/types/asr_page_request.py @@ -0,0 +1,43 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .asr_page_request_functions_item import AsrPageRequestFunctionsItem +import pydantic +from .asr_page_request_selected_model import AsrPageRequestSelectedModel +from .asr_page_request_translation_model import AsrPageRequestTranslationModel +from .asr_page_request_output_format import AsrPageRequestOutputFormat +from .run_settings import RunSettings +from ..core.pydantic_utilities import IS_PYDANTIC_V2 + + +class AsrPageRequest(UniversalBaseModel): + functions: typing.Optional[typing.List[AsrPageRequestFunctionsItem]] = None + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Variables to be used as Jinja prompt templates and in functions as arguments + """ + + documents: typing.List[str] + selected_model: typing.Optional[AsrPageRequestSelectedModel] = None + language: typing.Optional[str] = None + translation_model: typing.Optional[AsrPageRequestTranslationModel] = None + output_format: typing.Optional[AsrPageRequestOutputFormat] = None + google_translate_target: typing.Optional[str] = pydantic.Field(default=None) + """ + use `translation_model` & `translation_target` instead. + """ + + translation_source: typing.Optional[str] = None + translation_target: typing.Optional[str] = None + glossary_document: typing.Optional[str] = None + settings: typing.Optional[RunSettings] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/asr_page_request_functions_item.py b/src/gooey/types/asr_page_request_functions_item.py new file mode 100644 index 0000000..4b3e69d --- /dev/null +++ b/src/gooey/types/asr_page_request_functions_item.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from .asr_page_request_functions_item_trigger import AsrPageRequestFunctionsItemTrigger +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing + + +class AsrPageRequestFunctionsItem(UniversalBaseModel): + url: str + trigger: AsrPageRequestFunctionsItemTrigger = pydantic.Field() + """ + When to run this function. `pre` runs before the recipe, `post` runs after the recipe. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/asr_page_request_functions_item_trigger.py b/src/gooey/types/asr_page_request_functions_item_trigger.py new file mode 100644 index 0000000..adf037c --- /dev/null +++ b/src/gooey/types/asr_page_request_functions_item_trigger.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +AsrPageRequestFunctionsItemTrigger = typing.Union[typing.Literal["pre", "post"], typing.Any] diff --git a/src/gooey/types/bulk_eval_page_request.py b/src/gooey/types/bulk_eval_page_request.py new file mode 100644 index 0000000..1c2e417 --- /dev/null +++ b/src/gooey/types/bulk_eval_page_request.py @@ -0,0 +1,56 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .bulk_eval_page_request_functions_item import BulkEvalPageRequestFunctionsItem +import pydantic +from .bulk_eval_page_request_eval_prompts_item import BulkEvalPageRequestEvalPromptsItem +from .bulk_eval_page_request_agg_functions_item import BulkEvalPageRequestAggFunctionsItem +from .bulk_eval_page_request_selected_model import BulkEvalPageRequestSelectedModel +from .bulk_eval_page_request_response_format_type import BulkEvalPageRequestResponseFormatType +from .run_settings import RunSettings +from ..core.pydantic_utilities import IS_PYDANTIC_V2 + + +class BulkEvalPageRequest(UniversalBaseModel): + functions: typing.Optional[typing.List[BulkEvalPageRequestFunctionsItem]] = None + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Variables to be used as Jinja prompt templates and in functions as arguments + """ + + documents: typing.List[str] = pydantic.Field() + """ + Upload or link to a CSV or google sheet that contains your sample input data. + For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs. + Remember to includes header names in your CSV too. + """ + + eval_prompts: typing.Optional[typing.List[BulkEvalPageRequestEvalPromptsItem]] = pydantic.Field(default=None) + """ + Specify custom LLM prompts to calculate metrics that evaluate each row of the input data. The output should be a JSON object mapping the metric names to values. + _The `columns` dictionary can be used to reference the spreadsheet columns._ + """ + + agg_functions: typing.Optional[typing.List[BulkEvalPageRequestAggFunctionsItem]] = pydantic.Field(default=None) + """ + Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/pandas-docs/stable/reference/groupby.html#dataframegroupby-computations-descriptive-stats). + """ + + selected_model: typing.Optional[BulkEvalPageRequestSelectedModel] = None + avoid_repetition: typing.Optional[bool] = None + num_outputs: typing.Optional[int] = None + quality: typing.Optional[float] = None + max_tokens: typing.Optional[int] = None + sampling_temperature: typing.Optional[float] = None + response_format_type: typing.Optional[BulkEvalPageRequestResponseFormatType] = None + settings: typing.Optional[RunSettings] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/bulk_eval_page_request_agg_functions_item.py b/src/gooey/types/bulk_eval_page_request_agg_functions_item.py new file mode 100644 index 0000000..b7f5cd7 --- /dev/null +++ b/src/gooey/types/bulk_eval_page_request_agg_functions_item.py @@ -0,0 +1,21 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .bulk_eval_page_request_agg_functions_item_function import BulkEvalPageRequestAggFunctionsItemFunction +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class BulkEvalPageRequestAggFunctionsItem(UniversalBaseModel): + column: typing.Optional[str] = None + function: BulkEvalPageRequestAggFunctionsItemFunction + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/bulk_eval_page_request_agg_functions_item_function.py b/src/gooey/types/bulk_eval_page_request_agg_functions_item_function.py new file mode 100644 index 0000000..cecef30 --- /dev/null +++ b/src/gooey/types/bulk_eval_page_request_agg_functions_item_function.py @@ -0,0 +1,25 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +BulkEvalPageRequestAggFunctionsItemFunction = typing.Union[ + typing.Literal[ + "mean", + "median", + "min", + "max", + "sum", + "cumsum", + "prod", + "cumprod", + "std", + "var", + "first", + "last", + "count", + "cumcount", + "nunique", + "rank", + ], + typing.Any, +] diff --git a/src/gooey/types/bulk_eval_page_request_eval_prompts_item.py b/src/gooey/types/bulk_eval_page_request_eval_prompts_item.py new file mode 100644 index 0000000..7d3956d --- /dev/null +++ b/src/gooey/types/bulk_eval_page_request_eval_prompts_item.py @@ -0,0 +1,20 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class BulkEvalPageRequestEvalPromptsItem(UniversalBaseModel): + name: str + prompt: str + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/bulk_eval_page_request_functions_item.py b/src/gooey/types/bulk_eval_page_request_functions_item.py new file mode 100644 index 0000000..b89037c --- /dev/null +++ b/src/gooey/types/bulk_eval_page_request_functions_item.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from .bulk_eval_page_request_functions_item_trigger import BulkEvalPageRequestFunctionsItemTrigger +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing + + +class BulkEvalPageRequestFunctionsItem(UniversalBaseModel): + url: str + trigger: BulkEvalPageRequestFunctionsItemTrigger = pydantic.Field() + """ + When to run this function. `pre` runs before the recipe, `post` runs after the recipe. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/bulk_eval_page_request_functions_item_trigger.py b/src/gooey/types/bulk_eval_page_request_functions_item_trigger.py new file mode 100644 index 0000000..f2726cc --- /dev/null +++ b/src/gooey/types/bulk_eval_page_request_functions_item_trigger.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +BulkEvalPageRequestFunctionsItemTrigger = typing.Union[typing.Literal["pre", "post"], typing.Any] diff --git a/src/gooey/evaluator/types/bulk_eval_page_request_response_format_type.py b/src/gooey/types/bulk_eval_page_request_response_format_type.py similarity index 100% rename from src/gooey/evaluator/types/bulk_eval_page_request_response_format_type.py rename to src/gooey/types/bulk_eval_page_request_response_format_type.py diff --git a/src/gooey/evaluator/types/bulk_eval_page_request_selected_model.py b/src/gooey/types/bulk_eval_page_request_selected_model.py similarity index 100% rename from src/gooey/evaluator/types/bulk_eval_page_request_selected_model.py rename to src/gooey/types/bulk_eval_page_request_selected_model.py diff --git a/src/gooey/types/bulk_runner_page_request.py b/src/gooey/types/bulk_runner_page_request.py new file mode 100644 index 0000000..78f996c --- /dev/null +++ b/src/gooey/types/bulk_runner_page_request.py @@ -0,0 +1,55 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .bulk_runner_page_request_functions_item import BulkRunnerPageRequestFunctionsItem +import pydantic +from .run_settings import RunSettings +from ..core.pydantic_utilities import IS_PYDANTIC_V2 + + +class BulkRunnerPageRequest(UniversalBaseModel): + functions: typing.Optional[typing.List[BulkRunnerPageRequestFunctionsItem]] = None + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Variables to be used as Jinja prompt templates and in functions as arguments + """ + + documents: typing.List[str] = pydantic.Field() + """ + Upload or link to a CSV or google sheet that contains your sample input data. + For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs. + Remember to includes header names in your CSV too. + """ + + run_urls: typing.List[str] = pydantic.Field() + """ + Provide one or more Gooey.AI workflow runs. + You can add multiple runs from the same recipe (e.g. two versions of your copilot) and we'll run the inputs over both of them. + """ + + input_columns: typing.Dict[str, str] = pydantic.Field() + """ + For each input field in the Gooey.AI workflow, specify the column in your input data that corresponds to it. + """ + + output_columns: typing.Dict[str, str] = pydantic.Field() + """ + For each output field in the Gooey.AI workflow, specify the column name that you'd like to use for it in the output data. + """ + + eval_urls: typing.Optional[typing.List[str]] = pydantic.Field(default=None) + """ + _(optional)_ Add one or more Gooey.AI Evaluator Workflows to evaluate the results of your runs. + """ + + settings: typing.Optional[RunSettings] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/bulk_runner_page_request_functions_item.py b/src/gooey/types/bulk_runner_page_request_functions_item.py new file mode 100644 index 0000000..a0a1fd5 --- /dev/null +++ b/src/gooey/types/bulk_runner_page_request_functions_item.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from .bulk_runner_page_request_functions_item_trigger import BulkRunnerPageRequestFunctionsItemTrigger +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing + + +class BulkRunnerPageRequestFunctionsItem(UniversalBaseModel): + url: str + trigger: BulkRunnerPageRequestFunctionsItemTrigger = pydantic.Field() + """ + When to run this function. `pre` runs before the recipe, `post` runs after the recipe. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/bulk_runner_page_request_functions_item_trigger.py b/src/gooey/types/bulk_runner_page_request_functions_item_trigger.py new file mode 100644 index 0000000..e63024b --- /dev/null +++ b/src/gooey/types/bulk_runner_page_request_functions_item_trigger.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +BulkRunnerPageRequestFunctionsItemTrigger = typing.Union[typing.Literal["pre", "post"], typing.Any] diff --git a/src/gooey/types/chyron_plant_page_request.py b/src/gooey/types/chyron_plant_page_request.py index e0733de..c03c35a 100644 --- a/src/gooey/types/chyron_plant_page_request.py +++ b/src/gooey/types/chyron_plant_page_request.py @@ -2,14 +2,14 @@ from ..core.pydantic_utilities import UniversalBaseModel import typing -from .recipe_function import RecipeFunction +from .chyron_plant_page_request_functions_item import ChyronPlantPageRequestFunctionsItem import pydantic from .run_settings import RunSettings from ..core.pydantic_utilities import IS_PYDANTIC_V2 class ChyronPlantPageRequest(UniversalBaseModel): - functions: typing.Optional[typing.List[RecipeFunction]] = None + functions: typing.Optional[typing.List[ChyronPlantPageRequestFunctionsItem]] = None variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) """ Variables to be used as Jinja prompt templates and in functions as arguments diff --git a/src/gooey/types/chyron_plant_page_request_functions_item.py b/src/gooey/types/chyron_plant_page_request_functions_item.py new file mode 100644 index 0000000..9e42d2c --- /dev/null +++ b/src/gooey/types/chyron_plant_page_request_functions_item.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from .chyron_plant_page_request_functions_item_trigger import ChyronPlantPageRequestFunctionsItemTrigger +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing + + +class ChyronPlantPageRequestFunctionsItem(UniversalBaseModel): + url: str + trigger: ChyronPlantPageRequestFunctionsItemTrigger = pydantic.Field() + """ + When to run this function. `pre` runs before the recipe, `post` runs after the recipe. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/chyron_plant_page_request_functions_item_trigger.py b/src/gooey/types/chyron_plant_page_request_functions_item_trigger.py new file mode 100644 index 0000000..a260d69 --- /dev/null +++ b/src/gooey/types/chyron_plant_page_request_functions_item_trigger.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +ChyronPlantPageRequestFunctionsItemTrigger = typing.Union[typing.Literal["pre", "post"], typing.Any] diff --git a/src/gooey/types/compare_llm_page_request.py b/src/gooey/types/compare_llm_page_request.py new file mode 100644 index 0000000..4665729 --- /dev/null +++ b/src/gooey/types/compare_llm_page_request.py @@ -0,0 +1,37 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .compare_llm_page_request_functions_item import CompareLlmPageRequestFunctionsItem +import pydantic +from .compare_llm_page_request_selected_models_item import CompareLlmPageRequestSelectedModelsItem +from .compare_llm_page_request_response_format_type import CompareLlmPageRequestResponseFormatType +from .run_settings import RunSettings +from ..core.pydantic_utilities import IS_PYDANTIC_V2 + + +class CompareLlmPageRequest(UniversalBaseModel): + functions: typing.Optional[typing.List[CompareLlmPageRequestFunctionsItem]] = None + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Variables to be used as Jinja prompt templates and in functions as arguments + """ + + input_prompt: typing.Optional[str] = None + selected_models: typing.Optional[typing.List[CompareLlmPageRequestSelectedModelsItem]] = None + avoid_repetition: typing.Optional[bool] = None + num_outputs: typing.Optional[int] = None + quality: typing.Optional[float] = None + max_tokens: typing.Optional[int] = None + sampling_temperature: typing.Optional[float] = None + response_format_type: typing.Optional[CompareLlmPageRequestResponseFormatType] = None + settings: typing.Optional[RunSettings] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/compare_llm_page_request_functions_item.py b/src/gooey/types/compare_llm_page_request_functions_item.py new file mode 100644 index 0000000..c30e9e4 --- /dev/null +++ b/src/gooey/types/compare_llm_page_request_functions_item.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from .compare_llm_page_request_functions_item_trigger import CompareLlmPageRequestFunctionsItemTrigger +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing + + +class CompareLlmPageRequestFunctionsItem(UniversalBaseModel): + url: str + trigger: CompareLlmPageRequestFunctionsItemTrigger = pydantic.Field() + """ + When to run this function. `pre` runs before the recipe, `post` runs after the recipe. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/compare_llm_page_request_functions_item_trigger.py b/src/gooey/types/compare_llm_page_request_functions_item_trigger.py new file mode 100644 index 0000000..6c3fce3 --- /dev/null +++ b/src/gooey/types/compare_llm_page_request_functions_item_trigger.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +CompareLlmPageRequestFunctionsItemTrigger = typing.Union[typing.Literal["pre", "post"], typing.Any] diff --git a/src/gooey/types/compare_text2img_page_request.py b/src/gooey/types/compare_text2img_page_request.py new file mode 100644 index 0000000..7c7a17c --- /dev/null +++ b/src/gooey/types/compare_text2img_page_request.py @@ -0,0 +1,44 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .compare_text2img_page_request_functions_item import CompareText2ImgPageRequestFunctionsItem +import pydantic +from .compare_text2img_page_request_selected_models_item import CompareText2ImgPageRequestSelectedModelsItem +from .compare_text2img_page_request_scheduler import CompareText2ImgPageRequestScheduler +from .run_settings import RunSettings +from ..core.pydantic_utilities import IS_PYDANTIC_V2 + + +class CompareText2ImgPageRequest(UniversalBaseModel): + functions: typing.Optional[typing.List[CompareText2ImgPageRequestFunctionsItem]] = None + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Variables to be used as Jinja prompt templates and in functions as arguments + """ + + text_prompt: str + negative_prompt: typing.Optional[str] = None + output_width: typing.Optional[int] = None + output_height: typing.Optional[int] = None + num_outputs: typing.Optional[int] = None + quality: typing.Optional[int] = None + dall_e3quality: typing.Optional[str] = pydantic.Field(alias="dall_e_3_quality", default=None) + dall_e3style: typing.Optional[str] = pydantic.Field(alias="dall_e_3_style", default=None) + guidance_scale: typing.Optional[float] = None + seed: typing.Optional[int] = None + sd2upscaling: typing.Optional[bool] = pydantic.Field(alias="sd_2_upscaling", default=None) + selected_models: typing.Optional[typing.List[CompareText2ImgPageRequestSelectedModelsItem]] = None + scheduler: typing.Optional[CompareText2ImgPageRequestScheduler] = None + edit_instruction: typing.Optional[str] = None + image_guidance_scale: typing.Optional[float] = None + settings: typing.Optional[RunSettings] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/compare_text2img_page_request_functions_item.py b/src/gooey/types/compare_text2img_page_request_functions_item.py new file mode 100644 index 0000000..ce23010 --- /dev/null +++ b/src/gooey/types/compare_text2img_page_request_functions_item.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from .compare_text2img_page_request_functions_item_trigger import CompareText2ImgPageRequestFunctionsItemTrigger +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing + + +class CompareText2ImgPageRequestFunctionsItem(UniversalBaseModel): + url: str + trigger: CompareText2ImgPageRequestFunctionsItemTrigger = pydantic.Field() + """ + When to run this function. `pre` runs before the recipe, `post` runs after the recipe. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/compare_text2img_page_request_functions_item_trigger.py b/src/gooey/types/compare_text2img_page_request_functions_item_trigger.py new file mode 100644 index 0000000..31a6df3 --- /dev/null +++ b/src/gooey/types/compare_text2img_page_request_functions_item_trigger.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +CompareText2ImgPageRequestFunctionsItemTrigger = typing.Union[typing.Literal["pre", "post"], typing.Any] diff --git a/src/gooey/types/compare_upscaler_page_request.py b/src/gooey/types/compare_upscaler_page_request.py new file mode 100644 index 0000000..4c6c073 --- /dev/null +++ b/src/gooey/types/compare_upscaler_page_request.py @@ -0,0 +1,37 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .compare_upscaler_page_request_functions_item import CompareUpscalerPageRequestFunctionsItem +import pydantic +from .compare_upscaler_page_request_selected_models_item import CompareUpscalerPageRequestSelectedModelsItem +from .run_settings import RunSettings +from ..core.pydantic_utilities import IS_PYDANTIC_V2 + + +class CompareUpscalerPageRequest(UniversalBaseModel): + functions: typing.Optional[typing.List[CompareUpscalerPageRequestFunctionsItem]] = None + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Variables to be used as Jinja prompt templates and in functions as arguments + """ + + input_image: typing.Optional[str] = None + input_video: typing.Optional[str] = None + scale: int = pydantic.Field() + """ + The final upsampling scale of the image + """ + + selected_models: typing.Optional[typing.List[CompareUpscalerPageRequestSelectedModelsItem]] = None + selected_bg_model: typing.Optional[typing.Literal["real_esrgan_x2"]] = None + settings: typing.Optional[RunSettings] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/compare_upscaler_page_request_functions_item.py b/src/gooey/types/compare_upscaler_page_request_functions_item.py new file mode 100644 index 0000000..555f3be --- /dev/null +++ b/src/gooey/types/compare_upscaler_page_request_functions_item.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from .compare_upscaler_page_request_functions_item_trigger import CompareUpscalerPageRequestFunctionsItemTrigger +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing + + +class CompareUpscalerPageRequestFunctionsItem(UniversalBaseModel): + url: str + trigger: CompareUpscalerPageRequestFunctionsItemTrigger = pydantic.Field() + """ + When to run this function. `pre` runs before the recipe, `post` runs after the recipe. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/compare_upscaler_page_request_functions_item_trigger.py b/src/gooey/types/compare_upscaler_page_request_functions_item_trigger.py new file mode 100644 index 0000000..6d20867 --- /dev/null +++ b/src/gooey/types/compare_upscaler_page_request_functions_item_trigger.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +CompareUpscalerPageRequestFunctionsItemTrigger = typing.Union[typing.Literal["pre", "post"], typing.Any] diff --git a/src/gooey/types/deforum_sd_page_request.py b/src/gooey/types/deforum_sd_page_request.py new file mode 100644 index 0000000..0daeb38 --- /dev/null +++ b/src/gooey/types/deforum_sd_page_request.py @@ -0,0 +1,41 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .deforum_sd_page_request_functions_item import DeforumSdPageRequestFunctionsItem +import pydantic +from .deforum_sd_page_request_animation_prompts_item import DeforumSdPageRequestAnimationPromptsItem +from .deforum_sd_page_request_selected_model import DeforumSdPageRequestSelectedModel +from .run_settings import RunSettings +from ..core.pydantic_utilities import IS_PYDANTIC_V2 + + +class DeforumSdPageRequest(UniversalBaseModel): + functions: typing.Optional[typing.List[DeforumSdPageRequestFunctionsItem]] = None + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Variables to be used as Jinja prompt templates and in functions as arguments + """ + + animation_prompts: typing.List[DeforumSdPageRequestAnimationPromptsItem] + max_frames: typing.Optional[int] = None + selected_model: typing.Optional[DeforumSdPageRequestSelectedModel] = None + animation_mode: typing.Optional[str] = None + zoom: typing.Optional[str] = None + translation_x: typing.Optional[str] = None + translation_y: typing.Optional[str] = None + rotation3d_x: typing.Optional[str] = pydantic.Field(alias="rotation_3d_x", default=None) + rotation3d_y: typing.Optional[str] = pydantic.Field(alias="rotation_3d_y", default=None) + rotation3d_z: typing.Optional[str] = pydantic.Field(alias="rotation_3d_z", default=None) + fps: typing.Optional[int] = None + seed: typing.Optional[int] = None + settings: typing.Optional[RunSettings] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/deforum_sd_page_request_animation_prompts_item.py b/src/gooey/types/deforum_sd_page_request_animation_prompts_item.py new file mode 100644 index 0000000..fedd42c --- /dev/null +++ b/src/gooey/types/deforum_sd_page_request_animation_prompts_item.py @@ -0,0 +1,20 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class DeforumSdPageRequestAnimationPromptsItem(UniversalBaseModel): + frame: str + prompt: str + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/deforum_sd_page_request_functions_item.py b/src/gooey/types/deforum_sd_page_request_functions_item.py new file mode 100644 index 0000000..d8b171d --- /dev/null +++ b/src/gooey/types/deforum_sd_page_request_functions_item.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from .deforum_sd_page_request_functions_item_trigger import DeforumSdPageRequestFunctionsItemTrigger +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing + + +class DeforumSdPageRequestFunctionsItem(UniversalBaseModel): + url: str + trigger: DeforumSdPageRequestFunctionsItemTrigger = pydantic.Field() + """ + When to run this function. `pre` runs before the recipe, `post` runs after the recipe. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/deforum_sd_page_request_functions_item_trigger.py b/src/gooey/types/deforum_sd_page_request_functions_item_trigger.py new file mode 100644 index 0000000..8dce880 --- /dev/null +++ b/src/gooey/types/deforum_sd_page_request_functions_item_trigger.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +DeforumSdPageRequestFunctionsItemTrigger = typing.Union[typing.Literal["pre", "post"], typing.Any] diff --git a/src/gooey/types/doc_extract_page_request.py b/src/gooey/types/doc_extract_page_request.py new file mode 100644 index 0000000..4a9b0a4 --- /dev/null +++ b/src/gooey/types/doc_extract_page_request.py @@ -0,0 +1,43 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .doc_extract_page_request_functions_item import DocExtractPageRequestFunctionsItem +import pydantic +from .doc_extract_page_request_selected_asr_model import DocExtractPageRequestSelectedAsrModel +from .doc_extract_page_request_selected_model import DocExtractPageRequestSelectedModel +from .doc_extract_page_request_response_format_type import DocExtractPageRequestResponseFormatType +from .run_settings import RunSettings +from ..core.pydantic_utilities import IS_PYDANTIC_V2 + + +class DocExtractPageRequest(UniversalBaseModel): + functions: typing.Optional[typing.List[DocExtractPageRequestFunctionsItem]] = None + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Variables to be used as Jinja prompt templates and in functions as arguments + """ + + documents: typing.List[str] + sheet_url: typing.Optional[str] = None + selected_asr_model: typing.Optional[DocExtractPageRequestSelectedAsrModel] = None + google_translate_target: typing.Optional[str] = None + glossary_document: typing.Optional[str] = None + task_instructions: typing.Optional[str] = None + selected_model: typing.Optional[DocExtractPageRequestSelectedModel] = None + avoid_repetition: typing.Optional[bool] = None + num_outputs: typing.Optional[int] = None + quality: typing.Optional[float] = None + max_tokens: typing.Optional[int] = None + sampling_temperature: typing.Optional[float] = None + response_format_type: typing.Optional[DocExtractPageRequestResponseFormatType] = None + settings: typing.Optional[RunSettings] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/doc_extract_page_request_functions_item.py b/src/gooey/types/doc_extract_page_request_functions_item.py new file mode 100644 index 0000000..178e55c --- /dev/null +++ b/src/gooey/types/doc_extract_page_request_functions_item.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from .doc_extract_page_request_functions_item_trigger import DocExtractPageRequestFunctionsItemTrigger +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing + + +class DocExtractPageRequestFunctionsItem(UniversalBaseModel): + url: str + trigger: DocExtractPageRequestFunctionsItemTrigger = pydantic.Field() + """ + When to run this function. `pre` runs before the recipe, `post` runs after the recipe. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/doc_extract_page_request_functions_item_trigger.py b/src/gooey/types/doc_extract_page_request_functions_item_trigger.py new file mode 100644 index 0000000..f80f6ac --- /dev/null +++ b/src/gooey/types/doc_extract_page_request_functions_item_trigger.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +DocExtractPageRequestFunctionsItemTrigger = typing.Union[typing.Literal["pre", "post"], typing.Any] diff --git a/src/gooey/types/doc_search_page_request.py b/src/gooey/types/doc_search_page_request.py new file mode 100644 index 0000000..18ba1e9 --- /dev/null +++ b/src/gooey/types/doc_search_page_request.py @@ -0,0 +1,56 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .doc_search_page_request_functions_item import DocSearchPageRequestFunctionsItem +import pydantic +from .doc_search_page_request_keyword_query import DocSearchPageRequestKeywordQuery +from .doc_search_page_request_embedding_model import DocSearchPageRequestEmbeddingModel +from .doc_search_page_request_selected_model import DocSearchPageRequestSelectedModel +from .doc_search_page_request_citation_style import DocSearchPageRequestCitationStyle +from .doc_search_page_request_response_format_type import DocSearchPageRequestResponseFormatType +from .run_settings import RunSettings +from ..core.pydantic_utilities import IS_PYDANTIC_V2 + + +class DocSearchPageRequest(UniversalBaseModel): + functions: typing.Optional[typing.List[DocSearchPageRequestFunctionsItem]] = None + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Variables to be used as Jinja prompt templates and in functions as arguments + """ + + search_query: str + keyword_query: typing.Optional[DocSearchPageRequestKeywordQuery] = None + documents: typing.Optional[typing.List[str]] = None + max_references: typing.Optional[int] = None + max_context_words: typing.Optional[int] = None + scroll_jump: typing.Optional[int] = None + doc_extract_url: typing.Optional[str] = None + embedding_model: typing.Optional[DocSearchPageRequestEmbeddingModel] = None + dense_weight: typing.Optional[float] = pydantic.Field(default=None) + """ + Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. + Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. + """ + + task_instructions: typing.Optional[str] = None + query_instructions: typing.Optional[str] = None + selected_model: typing.Optional[DocSearchPageRequestSelectedModel] = None + citation_style: typing.Optional[DocSearchPageRequestCitationStyle] = None + avoid_repetition: typing.Optional[bool] = None + num_outputs: typing.Optional[int] = None + quality: typing.Optional[float] = None + max_tokens: typing.Optional[int] = None + sampling_temperature: typing.Optional[float] = None + response_format_type: typing.Optional[DocSearchPageRequestResponseFormatType] = None + settings: typing.Optional[RunSettings] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/doc_search_page_request_functions_item.py b/src/gooey/types/doc_search_page_request_functions_item.py new file mode 100644 index 0000000..5589d3c --- /dev/null +++ b/src/gooey/types/doc_search_page_request_functions_item.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from .doc_search_page_request_functions_item_trigger import DocSearchPageRequestFunctionsItemTrigger +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing + + +class DocSearchPageRequestFunctionsItem(UniversalBaseModel): + url: str + trigger: DocSearchPageRequestFunctionsItemTrigger = pydantic.Field() + """ + When to run this function. `pre` runs before the recipe, `post` runs after the recipe. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/doc_search_page_request_functions_item_trigger.py b/src/gooey/types/doc_search_page_request_functions_item_trigger.py new file mode 100644 index 0000000..81cfbc8 --- /dev/null +++ b/src/gooey/types/doc_search_page_request_functions_item_trigger.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +DocSearchPageRequestFunctionsItemTrigger = typing.Union[typing.Literal["pre", "post"], typing.Any] diff --git a/src/gooey/types/doc_summary_page_request.py b/src/gooey/types/doc_summary_page_request.py new file mode 100644 index 0000000..cc657a9 --- /dev/null +++ b/src/gooey/types/doc_summary_page_request.py @@ -0,0 +1,43 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .doc_summary_page_request_functions_item import DocSummaryPageRequestFunctionsItem +import pydantic +from .doc_summary_page_request_selected_model import DocSummaryPageRequestSelectedModel +from .doc_summary_page_request_selected_asr_model import DocSummaryPageRequestSelectedAsrModel +from .doc_summary_page_request_response_format_type import DocSummaryPageRequestResponseFormatType +from .run_settings import RunSettings +from ..core.pydantic_utilities import IS_PYDANTIC_V2 + + +class DocSummaryPageRequest(UniversalBaseModel): + functions: typing.Optional[typing.List[DocSummaryPageRequestFunctionsItem]] = None + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Variables to be used as Jinja prompt templates and in functions as arguments + """ + + documents: typing.List[str] + task_instructions: typing.Optional[str] = None + merge_instructions: typing.Optional[str] = None + selected_model: typing.Optional[DocSummaryPageRequestSelectedModel] = None + chain_type: typing.Optional[typing.Literal["map_reduce"]] = None + selected_asr_model: typing.Optional[DocSummaryPageRequestSelectedAsrModel] = None + google_translate_target: typing.Optional[str] = None + avoid_repetition: typing.Optional[bool] = None + num_outputs: typing.Optional[int] = None + quality: typing.Optional[float] = None + max_tokens: typing.Optional[int] = None + sampling_temperature: typing.Optional[float] = None + response_format_type: typing.Optional[DocSummaryPageRequestResponseFormatType] = None + settings: typing.Optional[RunSettings] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/doc_summary_page_request_functions_item.py b/src/gooey/types/doc_summary_page_request_functions_item.py new file mode 100644 index 0000000..164dde2 --- /dev/null +++ b/src/gooey/types/doc_summary_page_request_functions_item.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from .doc_summary_page_request_functions_item_trigger import DocSummaryPageRequestFunctionsItemTrigger +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing + + +class DocSummaryPageRequestFunctionsItem(UniversalBaseModel): + url: str + trigger: DocSummaryPageRequestFunctionsItemTrigger = pydantic.Field() + """ + When to run this function. `pre` runs before the recipe, `post` runs after the recipe. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/doc_summary_page_request_functions_item_trigger.py b/src/gooey/types/doc_summary_page_request_functions_item_trigger.py new file mode 100644 index 0000000..66b8074 --- /dev/null +++ b/src/gooey/types/doc_summary_page_request_functions_item_trigger.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +DocSummaryPageRequestFunctionsItemTrigger = typing.Union[typing.Literal["pre", "post"], typing.Any] diff --git a/src/gooey/types/email_face_inpainting_page_request.py b/src/gooey/types/email_face_inpainting_page_request.py new file mode 100644 index 0000000..d28b82c --- /dev/null +++ b/src/gooey/types/email_face_inpainting_page_request.py @@ -0,0 +1,51 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .email_face_inpainting_page_request_functions_item import EmailFaceInpaintingPageRequestFunctionsItem +import pydantic +from .email_face_inpainting_page_request_selected_model import EmailFaceInpaintingPageRequestSelectedModel +from .run_settings import RunSettings +from ..core.pydantic_utilities import IS_PYDANTIC_V2 + + +class EmailFaceInpaintingPageRequest(UniversalBaseModel): + functions: typing.Optional[typing.List[EmailFaceInpaintingPageRequestFunctionsItem]] = None + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Variables to be used as Jinja prompt templates and in functions as arguments + """ + + email_address: typing.Optional[str] = None + twitter_handle: typing.Optional[str] = None + text_prompt: str + face_scale: typing.Optional[float] = None + face_pos_x: typing.Optional[float] = None + face_pos_y: typing.Optional[float] = None + selected_model: typing.Optional[EmailFaceInpaintingPageRequestSelectedModel] = None + negative_prompt: typing.Optional[str] = None + num_outputs: typing.Optional[int] = None + quality: typing.Optional[int] = None + upscale_factor: typing.Optional[float] = None + output_width: typing.Optional[int] = None + output_height: typing.Optional[int] = None + guidance_scale: typing.Optional[float] = None + should_send_email: typing.Optional[bool] = None + email_from: typing.Optional[str] = None + email_cc: typing.Optional[str] = None + email_bcc: typing.Optional[str] = None + email_subject: typing.Optional[str] = None + email_body: typing.Optional[str] = None + email_body_enable_html: typing.Optional[bool] = None + fallback_email_body: typing.Optional[str] = None + seed: typing.Optional[int] = None + settings: typing.Optional[RunSettings] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/email_face_inpainting_page_request_functions_item.py b/src/gooey/types/email_face_inpainting_page_request_functions_item.py new file mode 100644 index 0000000..cee7dbd --- /dev/null +++ b/src/gooey/types/email_face_inpainting_page_request_functions_item.py @@ -0,0 +1,26 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from .email_face_inpainting_page_request_functions_item_trigger import ( + EmailFaceInpaintingPageRequestFunctionsItemTrigger, +) +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing + + +class EmailFaceInpaintingPageRequestFunctionsItem(UniversalBaseModel): + url: str + trigger: EmailFaceInpaintingPageRequestFunctionsItemTrigger = pydantic.Field() + """ + When to run this function. `pre` runs before the recipe, `post` runs after the recipe. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/email_face_inpainting_page_request_functions_item_trigger.py b/src/gooey/types/email_face_inpainting_page_request_functions_item_trigger.py new file mode 100644 index 0000000..5ed78eb --- /dev/null +++ b/src/gooey/types/email_face_inpainting_page_request_functions_item_trigger.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +EmailFaceInpaintingPageRequestFunctionsItemTrigger = typing.Union[typing.Literal["pre", "post"], typing.Any] diff --git a/src/gooey/types/embeddings_page_request.py b/src/gooey/types/embeddings_page_request.py new file mode 100644 index 0000000..10e63ff --- /dev/null +++ b/src/gooey/types/embeddings_page_request.py @@ -0,0 +1,30 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .embeddings_page_request_functions_item import EmbeddingsPageRequestFunctionsItem +import pydantic +from .embeddings_page_request_selected_model import EmbeddingsPageRequestSelectedModel +from .run_settings import RunSettings +from ..core.pydantic_utilities import IS_PYDANTIC_V2 + + +class EmbeddingsPageRequest(UniversalBaseModel): + functions: typing.Optional[typing.List[EmbeddingsPageRequestFunctionsItem]] = None + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Variables to be used as Jinja prompt templates and in functions as arguments + """ + + texts: typing.List[str] + selected_model: typing.Optional[EmbeddingsPageRequestSelectedModel] = None + settings: typing.Optional[RunSettings] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/embeddings_page_request_functions_item.py b/src/gooey/types/embeddings_page_request_functions_item.py new file mode 100644 index 0000000..3a58b4a --- /dev/null +++ b/src/gooey/types/embeddings_page_request_functions_item.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from .embeddings_page_request_functions_item_trigger import EmbeddingsPageRequestFunctionsItemTrigger +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing + + +class EmbeddingsPageRequestFunctionsItem(UniversalBaseModel): + url: str + trigger: EmbeddingsPageRequestFunctionsItemTrigger = pydantic.Field() + """ + When to run this function. `pre` runs before the recipe, `post` runs after the recipe. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/embeddings_page_request_functions_item_trigger.py b/src/gooey/types/embeddings_page_request_functions_item_trigger.py new file mode 100644 index 0000000..1e03d0b --- /dev/null +++ b/src/gooey/types/embeddings_page_request_functions_item_trigger.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +EmbeddingsPageRequestFunctionsItemTrigger = typing.Union[typing.Literal["pre", "post"], typing.Any] diff --git a/src/gooey/types/face_inpainting_page_request.py b/src/gooey/types/face_inpainting_page_request.py new file mode 100644 index 0000000..79b7b1b --- /dev/null +++ b/src/gooey/types/face_inpainting_page_request.py @@ -0,0 +1,42 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .face_inpainting_page_request_functions_item import FaceInpaintingPageRequestFunctionsItem +import pydantic +from .face_inpainting_page_request_selected_model import FaceInpaintingPageRequestSelectedModel +from .run_settings import RunSettings +from ..core.pydantic_utilities import IS_PYDANTIC_V2 + + +class FaceInpaintingPageRequest(UniversalBaseModel): + functions: typing.Optional[typing.List[FaceInpaintingPageRequestFunctionsItem]] = None + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Variables to be used as Jinja prompt templates and in functions as arguments + """ + + input_image: str + text_prompt: str + face_scale: typing.Optional[float] = None + face_pos_x: typing.Optional[float] = None + face_pos_y: typing.Optional[float] = None + selected_model: typing.Optional[FaceInpaintingPageRequestSelectedModel] = None + negative_prompt: typing.Optional[str] = None + num_outputs: typing.Optional[int] = None + quality: typing.Optional[int] = None + upscale_factor: typing.Optional[float] = None + output_width: typing.Optional[int] = None + output_height: typing.Optional[int] = None + guidance_scale: typing.Optional[float] = None + seed: typing.Optional[int] = None + settings: typing.Optional[RunSettings] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/face_inpainting_page_request_functions_item.py b/src/gooey/types/face_inpainting_page_request_functions_item.py new file mode 100644 index 0000000..56e47b0 --- /dev/null +++ b/src/gooey/types/face_inpainting_page_request_functions_item.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from .face_inpainting_page_request_functions_item_trigger import FaceInpaintingPageRequestFunctionsItemTrigger +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing + + +class FaceInpaintingPageRequestFunctionsItem(UniversalBaseModel): + url: str + trigger: FaceInpaintingPageRequestFunctionsItemTrigger = pydantic.Field() + """ + When to run this function. `pre` runs before the recipe, `post` runs after the recipe. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/face_inpainting_page_request_functions_item_trigger.py b/src/gooey/types/face_inpainting_page_request_functions_item_trigger.py new file mode 100644 index 0000000..87301c5 --- /dev/null +++ b/src/gooey/types/face_inpainting_page_request_functions_item_trigger.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +FaceInpaintingPageRequestFunctionsItemTrigger = typing.Union[typing.Literal["pre", "post"], typing.Any] diff --git a/src/gooey/types/functions_page_request.py b/src/gooey/types/functions_page_request.py new file mode 100644 index 0000000..f0e15f4 --- /dev/null +++ b/src/gooey/types/functions_page_request.py @@ -0,0 +1,30 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +import pydantic +from .run_settings import RunSettings +from ..core.pydantic_utilities import IS_PYDANTIC_V2 + + +class FunctionsPageRequest(UniversalBaseModel): + code: typing.Optional[str] = pydantic.Field(default=None) + """ + The JS code to be executed. + """ + + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Variables to be used in the code + """ + + settings: typing.Optional[RunSettings] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/google_gpt_page_request.py b/src/gooey/types/google_gpt_page_request.py new file mode 100644 index 0000000..a2dfbae --- /dev/null +++ b/src/gooey/types/google_gpt_page_request.py @@ -0,0 +1,66 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .google_gpt_page_request_functions_item import GoogleGptPageRequestFunctionsItem +import pydantic +from .google_gpt_page_request_selected_model import GoogleGptPageRequestSelectedModel +from .google_gpt_page_request_embedding_model import GoogleGptPageRequestEmbeddingModel +from .google_gpt_page_request_response_format_type import GoogleGptPageRequestResponseFormatType +from .serp_search_location import SerpSearchLocation +from .serp_search_type import SerpSearchType +from .run_settings import RunSettings +from ..core.pydantic_utilities import IS_PYDANTIC_V2 + + +class GoogleGptPageRequest(UniversalBaseModel): + functions: typing.Optional[typing.List[GoogleGptPageRequestFunctionsItem]] = None + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Variables to be used as Jinja prompt templates and in functions as arguments + """ + + search_query: str + site_filter: str + task_instructions: typing.Optional[str] = None + query_instructions: typing.Optional[str] = None + selected_model: typing.Optional[GoogleGptPageRequestSelectedModel] = None + max_search_urls: typing.Optional[int] = None + max_references: typing.Optional[int] = None + max_context_words: typing.Optional[int] = None + scroll_jump: typing.Optional[int] = None + embedding_model: typing.Optional[GoogleGptPageRequestEmbeddingModel] = None + dense_weight: typing.Optional[float] = pydantic.Field(default=None) + """ + Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. + Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. + """ + + avoid_repetition: typing.Optional[bool] = None + num_outputs: typing.Optional[int] = None + quality: typing.Optional[float] = None + max_tokens: typing.Optional[int] = None + sampling_temperature: typing.Optional[float] = None + response_format_type: typing.Optional[GoogleGptPageRequestResponseFormatType] = None + serp_search_location: typing.Optional[SerpSearchLocation] = None + scaleserp_locations: typing.Optional[typing.List[str]] = pydantic.Field(default=None) + """ + DEPRECATED: use `serp_search_location` instead + """ + + serp_search_type: typing.Optional[SerpSearchType] = None + scaleserp_search_field: typing.Optional[str] = pydantic.Field(default=None) + """ + DEPRECATED: use `serp_search_type` instead + """ + + settings: typing.Optional[RunSettings] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/google_gpt_page_request_functions_item.py b/src/gooey/types/google_gpt_page_request_functions_item.py new file mode 100644 index 0000000..6339fa4 --- /dev/null +++ b/src/gooey/types/google_gpt_page_request_functions_item.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from .google_gpt_page_request_functions_item_trigger import GoogleGptPageRequestFunctionsItemTrigger +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing + + +class GoogleGptPageRequestFunctionsItem(UniversalBaseModel): + url: str + trigger: GoogleGptPageRequestFunctionsItemTrigger = pydantic.Field() + """ + When to run this function. `pre` runs before the recipe, `post` runs after the recipe. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/google_gpt_page_request_functions_item_trigger.py b/src/gooey/types/google_gpt_page_request_functions_item_trigger.py new file mode 100644 index 0000000..1012309 --- /dev/null +++ b/src/gooey/types/google_gpt_page_request_functions_item_trigger.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +GoogleGptPageRequestFunctionsItemTrigger = typing.Union[typing.Literal["pre", "post"], typing.Any] diff --git a/src/gooey/types/google_image_gen_page_request.py b/src/gooey/types/google_image_gen_page_request.py new file mode 100644 index 0000000..d9164fa --- /dev/null +++ b/src/gooey/types/google_image_gen_page_request.py @@ -0,0 +1,46 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .google_image_gen_page_request_functions_item import GoogleImageGenPageRequestFunctionsItem +import pydantic +from .serp_search_location import SerpSearchLocation +from .google_image_gen_page_request_selected_model import GoogleImageGenPageRequestSelectedModel +from .run_settings import RunSettings +from ..core.pydantic_utilities import IS_PYDANTIC_V2 + + +class GoogleImageGenPageRequest(UniversalBaseModel): + functions: typing.Optional[typing.List[GoogleImageGenPageRequestFunctionsItem]] = None + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Variables to be used as Jinja prompt templates and in functions as arguments + """ + + serp_search_location: typing.Optional[SerpSearchLocation] = None + scaleserp_locations: typing.Optional[typing.List[str]] = pydantic.Field(default=None) + """ + DEPRECATED: use `serp_search_location` instead + """ + + search_query: str + text_prompt: str + selected_model: typing.Optional[GoogleImageGenPageRequestSelectedModel] = None + negative_prompt: typing.Optional[str] = None + num_outputs: typing.Optional[int] = None + quality: typing.Optional[int] = None + guidance_scale: typing.Optional[float] = None + prompt_strength: typing.Optional[float] = None + sd2upscaling: typing.Optional[bool] = pydantic.Field(alias="sd_2_upscaling", default=None) + seed: typing.Optional[int] = None + image_guidance_scale: typing.Optional[float] = None + settings: typing.Optional[RunSettings] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/google_image_gen_page_request_functions_item.py b/src/gooey/types/google_image_gen_page_request_functions_item.py new file mode 100644 index 0000000..b5d99e7 --- /dev/null +++ b/src/gooey/types/google_image_gen_page_request_functions_item.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from .google_image_gen_page_request_functions_item_trigger import GoogleImageGenPageRequestFunctionsItemTrigger +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing + + +class GoogleImageGenPageRequestFunctionsItem(UniversalBaseModel): + url: str + trigger: GoogleImageGenPageRequestFunctionsItemTrigger = pydantic.Field() + """ + When to run this function. `pre` runs before the recipe, `post` runs after the recipe. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/google_image_gen_page_request_functions_item_trigger.py b/src/gooey/types/google_image_gen_page_request_functions_item_trigger.py new file mode 100644 index 0000000..0637b1e --- /dev/null +++ b/src/gooey/types/google_image_gen_page_request_functions_item_trigger.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +GoogleImageGenPageRequestFunctionsItemTrigger = typing.Union[typing.Literal["pre", "post"], typing.Any] diff --git a/src/gooey/types/image_segmentation_page_request.py b/src/gooey/types/image_segmentation_page_request.py new file mode 100644 index 0000000..ffc2606 --- /dev/null +++ b/src/gooey/types/image_segmentation_page_request.py @@ -0,0 +1,36 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .image_segmentation_page_request_functions_item import ImageSegmentationPageRequestFunctionsItem +import pydantic +from .image_segmentation_page_request_selected_model import ImageSegmentationPageRequestSelectedModel +from .run_settings import RunSettings +from ..core.pydantic_utilities import IS_PYDANTIC_V2 + + +class ImageSegmentationPageRequest(UniversalBaseModel): + functions: typing.Optional[typing.List[ImageSegmentationPageRequestFunctionsItem]] = None + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Variables to be used as Jinja prompt templates and in functions as arguments + """ + + input_image: str + selected_model: typing.Optional[ImageSegmentationPageRequestSelectedModel] = None + mask_threshold: typing.Optional[float] = None + rect_persepective_transform: typing.Optional[bool] = None + reflection_opacity: typing.Optional[float] = None + obj_scale: typing.Optional[float] = None + obj_pos_x: typing.Optional[float] = None + obj_pos_y: typing.Optional[float] = None + settings: typing.Optional[RunSettings] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/image_segmentation_page_request_functions_item.py b/src/gooey/types/image_segmentation_page_request_functions_item.py new file mode 100644 index 0000000..e8f0607 --- /dev/null +++ b/src/gooey/types/image_segmentation_page_request_functions_item.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from .image_segmentation_page_request_functions_item_trigger import ImageSegmentationPageRequestFunctionsItemTrigger +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing + + +class ImageSegmentationPageRequestFunctionsItem(UniversalBaseModel): + url: str + trigger: ImageSegmentationPageRequestFunctionsItemTrigger = pydantic.Field() + """ + When to run this function. `pre` runs before the recipe, `post` runs after the recipe. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/image_segmentation_page_request_functions_item_trigger.py b/src/gooey/types/image_segmentation_page_request_functions_item_trigger.py new file mode 100644 index 0000000..f27b327 --- /dev/null +++ b/src/gooey/types/image_segmentation_page_request_functions_item_trigger.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +ImageSegmentationPageRequestFunctionsItemTrigger = typing.Union[typing.Literal["pre", "post"], typing.Any] diff --git a/src/gooey/types/img2img_page_request.py b/src/gooey/types/img2img_page_request.py new file mode 100644 index 0000000..980aa47 --- /dev/null +++ b/src/gooey/types/img2img_page_request.py @@ -0,0 +1,43 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .img2img_page_request_functions_item import Img2ImgPageRequestFunctionsItem +import pydantic +from .img2img_page_request_selected_model import Img2ImgPageRequestSelectedModel +from .img2img_page_request_selected_controlnet_model import Img2ImgPageRequestSelectedControlnetModel +from .run_settings import RunSettings +from ..core.pydantic_utilities import IS_PYDANTIC_V2 + + +class Img2ImgPageRequest(UniversalBaseModel): + functions: typing.Optional[typing.List[Img2ImgPageRequestFunctionsItem]] = None + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Variables to be used as Jinja prompt templates and in functions as arguments + """ + + input_image: str + text_prompt: typing.Optional[str] = None + selected_model: typing.Optional[Img2ImgPageRequestSelectedModel] = None + selected_controlnet_model: typing.Optional[Img2ImgPageRequestSelectedControlnetModel] = None + negative_prompt: typing.Optional[str] = None + num_outputs: typing.Optional[int] = None + quality: typing.Optional[int] = None + output_width: typing.Optional[int] = None + output_height: typing.Optional[int] = None + guidance_scale: typing.Optional[float] = None + prompt_strength: typing.Optional[float] = None + controlnet_conditioning_scale: typing.Optional[typing.List[float]] = None + seed: typing.Optional[int] = None + image_guidance_scale: typing.Optional[float] = None + settings: typing.Optional[RunSettings] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/img2img_page_request_functions_item.py b/src/gooey/types/img2img_page_request_functions_item.py new file mode 100644 index 0000000..96bc64b --- /dev/null +++ b/src/gooey/types/img2img_page_request_functions_item.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from .img2img_page_request_functions_item_trigger import Img2ImgPageRequestFunctionsItemTrigger +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing + + +class Img2ImgPageRequestFunctionsItem(UniversalBaseModel): + url: str + trigger: Img2ImgPageRequestFunctionsItemTrigger = pydantic.Field() + """ + When to run this function. `pre` runs before the recipe, `post` runs after the recipe. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/img2img_page_request_functions_item_trigger.py b/src/gooey/types/img2img_page_request_functions_item_trigger.py new file mode 100644 index 0000000..be448b1 --- /dev/null +++ b/src/gooey/types/img2img_page_request_functions_item_trigger.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +Img2ImgPageRequestFunctionsItemTrigger = typing.Union[typing.Literal["pre", "post"], typing.Any] diff --git a/src/gooey/types/letter_writer_page_request.py b/src/gooey/types/letter_writer_page_request.py index 5706083..8a7dab4 100644 --- a/src/gooey/types/letter_writer_page_request.py +++ b/src/gooey/types/letter_writer_page_request.py @@ -2,15 +2,15 @@ from ..core.pydantic_utilities import UniversalBaseModel import typing -from .recipe_function import RecipeFunction +from .letter_writer_page_request_functions_item import LetterWriterPageRequestFunctionsItem import pydantic -from .training_data_model import TrainingDataModel +from .letter_writer_page_request_example_letters_item import LetterWriterPageRequestExampleLettersItem from .run_settings import RunSettings from ..core.pydantic_utilities import IS_PYDANTIC_V2 class LetterWriterPageRequest(UniversalBaseModel): - functions: typing.Optional[typing.List[RecipeFunction]] = None + functions: typing.Optional[typing.List[LetterWriterPageRequestFunctionsItem]] = None variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) """ Variables to be used as Jinja prompt templates and in functions as arguments @@ -18,7 +18,7 @@ class LetterWriterPageRequest(UniversalBaseModel): action_id: str prompt_header: typing.Optional[str] = None - example_letters: typing.Optional[typing.List[TrainingDataModel]] = None + example_letters: typing.Optional[typing.List[LetterWriterPageRequestExampleLettersItem]] = None lm_selected_api: typing.Optional[str] = None lm_selected_engine: typing.Optional[str] = None num_outputs: typing.Optional[int] = None diff --git a/src/gooey/types/letter_writer_page_request_example_letters_item.py b/src/gooey/types/letter_writer_page_request_example_letters_item.py new file mode 100644 index 0000000..20ecac1 --- /dev/null +++ b/src/gooey/types/letter_writer_page_request_example_letters_item.py @@ -0,0 +1,20 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing +import pydantic + + +class LetterWriterPageRequestExampleLettersItem(UniversalBaseModel): + prompt: str + completion: str + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/letter_writer_page_request_functions_item.py b/src/gooey/types/letter_writer_page_request_functions_item.py new file mode 100644 index 0000000..034066d --- /dev/null +++ b/src/gooey/types/letter_writer_page_request_functions_item.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from .letter_writer_page_request_functions_item_trigger import LetterWriterPageRequestFunctionsItemTrigger +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing + + +class LetterWriterPageRequestFunctionsItem(UniversalBaseModel): + url: str + trigger: LetterWriterPageRequestFunctionsItemTrigger = pydantic.Field() + """ + When to run this function. `pre` runs before the recipe, `post` runs after the recipe. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/letter_writer_page_request_functions_item_trigger.py b/src/gooey/types/letter_writer_page_request_functions_item_trigger.py new file mode 100644 index 0000000..49b7234 --- /dev/null +++ b/src/gooey/types/letter_writer_page_request_functions_item_trigger.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +LetterWriterPageRequestFunctionsItemTrigger = typing.Union[typing.Literal["pre", "post"], typing.Any] diff --git a/src/gooey/types/lipsync_page_request.py b/src/gooey/types/lipsync_page_request.py new file mode 100644 index 0000000..a99de87 --- /dev/null +++ b/src/gooey/types/lipsync_page_request.py @@ -0,0 +1,37 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .lipsync_page_request_functions_item import LipsyncPageRequestFunctionsItem +import pydantic +from .lipsync_page_request_sadtalker_settings import LipsyncPageRequestSadtalkerSettings +from .lipsync_page_request_selected_model import LipsyncPageRequestSelectedModel +from .run_settings import RunSettings +from ..core.pydantic_utilities import IS_PYDANTIC_V2 + + +class LipsyncPageRequest(UniversalBaseModel): + functions: typing.Optional[typing.List[LipsyncPageRequestFunctionsItem]] = None + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Variables to be used as Jinja prompt templates and in functions as arguments + """ + + input_face: typing.Optional[str] = None + face_padding_top: typing.Optional[int] = None + face_padding_bottom: typing.Optional[int] = None + face_padding_left: typing.Optional[int] = None + face_padding_right: typing.Optional[int] = None + sadtalker_settings: typing.Optional[LipsyncPageRequestSadtalkerSettings] = None + selected_model: typing.Optional[LipsyncPageRequestSelectedModel] = None + input_audio: typing.Optional[str] = None + settings: typing.Optional[RunSettings] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/lipsync_page_request_functions_item.py b/src/gooey/types/lipsync_page_request_functions_item.py new file mode 100644 index 0000000..330b29c --- /dev/null +++ b/src/gooey/types/lipsync_page_request_functions_item.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from .lipsync_page_request_functions_item_trigger import LipsyncPageRequestFunctionsItemTrigger +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing + + +class LipsyncPageRequestFunctionsItem(UniversalBaseModel): + url: str + trigger: LipsyncPageRequestFunctionsItemTrigger = pydantic.Field() + """ + When to run this function. `pre` runs before the recipe, `post` runs after the recipe. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/lipsync_page_request_functions_item_trigger.py b/src/gooey/types/lipsync_page_request_functions_item_trigger.py new file mode 100644 index 0000000..4b4ff8d --- /dev/null +++ b/src/gooey/types/lipsync_page_request_functions_item_trigger.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +LipsyncPageRequestFunctionsItemTrigger = typing.Union[typing.Literal["pre", "post"], typing.Any] diff --git a/src/gooey/types/lipsync_page_request_sadtalker_settings.py b/src/gooey/types/lipsync_page_request_sadtalker_settings.py new file mode 100644 index 0000000..99f993c --- /dev/null +++ b/src/gooey/types/lipsync_page_request_sadtalker_settings.py @@ -0,0 +1,40 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .lipsync_page_request_sadtalker_settings_preprocess import LipsyncPageRequestSadtalkerSettingsPreprocess +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 + + +class LipsyncPageRequestSadtalkerSettings(UniversalBaseModel): + still: typing.Optional[bool] = None + preprocess: typing.Optional[LipsyncPageRequestSadtalkerSettingsPreprocess] = pydantic.Field(default=None) + """ + SadTalker only generates 512x512 output. 'crop' handles this by cropping the input to 512x512. 'resize' scales down the input to fit 512x512 and scales it back up after lipsyncing (does not work well for full person images, better for portraits). 'full' processes the cropped region and pastes it back into the original input. 'extcrop' and 'extfull' are similar to 'crop' and 'full' but with extended cropping. + """ + + pose_style: typing.Optional[int] = pydantic.Field(default=None) + """ + Random seed 0-45 inclusive that affects how the pose is animated. + """ + + expression_scale: typing.Optional[float] = pydantic.Field(default=None) + """ + Scale the amount of expression motion. 1.0 is normal, 0.5 is very reduced, and 2.0 is quite a lot. + """ + + ref_eyeblink: typing.Optional[str] = None + ref_pose: typing.Optional[str] = None + input_yaw: typing.Optional[typing.List[int]] = None + input_pitch: typing.Optional[typing.List[int]] = None + input_roll: typing.Optional[typing.List[int]] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/lipsync_page_request_sadtalker_settings_preprocess.py b/src/gooey/types/lipsync_page_request_sadtalker_settings_preprocess.py new file mode 100644 index 0000000..32c814a --- /dev/null +++ b/src/gooey/types/lipsync_page_request_sadtalker_settings_preprocess.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +LipsyncPageRequestSadtalkerSettingsPreprocess = typing.Union[ + typing.Literal["crop", "extcrop", "resize", "full", "extfull"], typing.Any +] diff --git a/src/gooey/lip_syncing/types/lipsync_page_request_selected_model.py b/src/gooey/types/lipsync_page_request_selected_model.py similarity index 100% rename from src/gooey/lip_syncing/types/lipsync_page_request_selected_model.py rename to src/gooey/types/lipsync_page_request_selected_model.py diff --git a/src/gooey/types/lipsync_tts_page_request.py b/src/gooey/types/lipsync_tts_page_request.py new file mode 100644 index 0000000..97d1379 --- /dev/null +++ b/src/gooey/types/lipsync_tts_page_request.py @@ -0,0 +1,62 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .lipsync_tts_page_request_functions_item import LipsyncTtsPageRequestFunctionsItem +import pydantic +from .lipsync_tts_page_request_tts_provider import LipsyncTtsPageRequestTtsProvider +from .lipsync_tts_page_request_openai_voice_name import LipsyncTtsPageRequestOpenaiVoiceName +from .lipsync_tts_page_request_openai_tts_model import LipsyncTtsPageRequestOpenaiTtsModel +from .lipsync_tts_page_request_sadtalker_settings import LipsyncTtsPageRequestSadtalkerSettings +from .lipsync_tts_page_request_selected_model import LipsyncTtsPageRequestSelectedModel +from .run_settings import RunSettings +from ..core.pydantic_utilities import IS_PYDANTIC_V2 + + +class LipsyncTtsPageRequest(UniversalBaseModel): + functions: typing.Optional[typing.List[LipsyncTtsPageRequestFunctionsItem]] = None + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Variables to be used as Jinja prompt templates and in functions as arguments + """ + + text_prompt: str + tts_provider: typing.Optional[LipsyncTtsPageRequestTtsProvider] = None + uberduck_voice_name: typing.Optional[str] = None + uberduck_speaking_rate: typing.Optional[float] = None + google_voice_name: typing.Optional[str] = None + google_speaking_rate: typing.Optional[float] = None + google_pitch: typing.Optional[float] = None + bark_history_prompt: typing.Optional[str] = None + elevenlabs_voice_name: typing.Optional[str] = pydantic.Field(default=None) + """ + Use `elevenlabs_voice_id` instead + """ + + elevenlabs_api_key: typing.Optional[str] = None + elevenlabs_voice_id: typing.Optional[str] = None + elevenlabs_model: typing.Optional[str] = None + elevenlabs_stability: typing.Optional[float] = None + elevenlabs_similarity_boost: typing.Optional[float] = None + elevenlabs_style: typing.Optional[float] = None + elevenlabs_speaker_boost: typing.Optional[bool] = None + azure_voice_name: typing.Optional[str] = None + openai_voice_name: typing.Optional[LipsyncTtsPageRequestOpenaiVoiceName] = None + openai_tts_model: typing.Optional[LipsyncTtsPageRequestOpenaiTtsModel] = None + input_face: typing.Optional[str] = None + face_padding_top: typing.Optional[int] = None + face_padding_bottom: typing.Optional[int] = None + face_padding_left: typing.Optional[int] = None + face_padding_right: typing.Optional[int] = None + sadtalker_settings: typing.Optional[LipsyncTtsPageRequestSadtalkerSettings] = None + selected_model: typing.Optional[LipsyncTtsPageRequestSelectedModel] = None + settings: typing.Optional[RunSettings] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/lipsync_tts_page_request_functions_item.py b/src/gooey/types/lipsync_tts_page_request_functions_item.py new file mode 100644 index 0000000..7557dd6 --- /dev/null +++ b/src/gooey/types/lipsync_tts_page_request_functions_item.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from .lipsync_tts_page_request_functions_item_trigger import LipsyncTtsPageRequestFunctionsItemTrigger +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing + + +class LipsyncTtsPageRequestFunctionsItem(UniversalBaseModel): + url: str + trigger: LipsyncTtsPageRequestFunctionsItemTrigger = pydantic.Field() + """ + When to run this function. `pre` runs before the recipe, `post` runs after the recipe. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/lipsync_tts_page_request_functions_item_trigger.py b/src/gooey/types/lipsync_tts_page_request_functions_item_trigger.py new file mode 100644 index 0000000..0502725 --- /dev/null +++ b/src/gooey/types/lipsync_tts_page_request_functions_item_trigger.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +LipsyncTtsPageRequestFunctionsItemTrigger = typing.Union[typing.Literal["pre", "post"], typing.Any] diff --git a/src/gooey/types/lipsync_tts_page_request_sadtalker_settings.py b/src/gooey/types/lipsync_tts_page_request_sadtalker_settings.py new file mode 100644 index 0000000..eaeb372 --- /dev/null +++ b/src/gooey/types/lipsync_tts_page_request_sadtalker_settings.py @@ -0,0 +1,40 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .lipsync_tts_page_request_sadtalker_settings_preprocess import LipsyncTtsPageRequestSadtalkerSettingsPreprocess +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 + + +class LipsyncTtsPageRequestSadtalkerSettings(UniversalBaseModel): + still: typing.Optional[bool] = None + preprocess: typing.Optional[LipsyncTtsPageRequestSadtalkerSettingsPreprocess] = pydantic.Field(default=None) + """ + SadTalker only generates 512x512 output. 'crop' handles this by cropping the input to 512x512. 'resize' scales down the input to fit 512x512 and scales it back up after lipsyncing (does not work well for full person images, better for portraits). 'full' processes the cropped region and pastes it back into the original input. 'extcrop' and 'extfull' are similar to 'crop' and 'full' but with extended cropping. + """ + + pose_style: typing.Optional[int] = pydantic.Field(default=None) + """ + Random seed 0-45 inclusive that affects how the pose is animated. + """ + + expression_scale: typing.Optional[float] = pydantic.Field(default=None) + """ + Scale the amount of expression motion. 1.0 is normal, 0.5 is very reduced, and 2.0 is quite a lot. + """ + + ref_eyeblink: typing.Optional[str] = None + ref_pose: typing.Optional[str] = None + input_yaw: typing.Optional[typing.List[int]] = None + input_pitch: typing.Optional[typing.List[int]] = None + input_roll: typing.Optional[typing.List[int]] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/lipsync_tts_page_request_sadtalker_settings_preprocess.py b/src/gooey/types/lipsync_tts_page_request_sadtalker_settings_preprocess.py new file mode 100644 index 0000000..02b229c --- /dev/null +++ b/src/gooey/types/lipsync_tts_page_request_sadtalker_settings_preprocess.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +LipsyncTtsPageRequestSadtalkerSettingsPreprocess = typing.Union[ + typing.Literal["crop", "extcrop", "resize", "full", "extfull"], typing.Any +] diff --git a/src/gooey/types/object_inpainting_page_request.py b/src/gooey/types/object_inpainting_page_request.py new file mode 100644 index 0000000..3757017 --- /dev/null +++ b/src/gooey/types/object_inpainting_page_request.py @@ -0,0 +1,43 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .object_inpainting_page_request_functions_item import ObjectInpaintingPageRequestFunctionsItem +import pydantic +from .object_inpainting_page_request_selected_model import ObjectInpaintingPageRequestSelectedModel +from .run_settings import RunSettings +from ..core.pydantic_utilities import IS_PYDANTIC_V2 + + +class ObjectInpaintingPageRequest(UniversalBaseModel): + functions: typing.Optional[typing.List[ObjectInpaintingPageRequestFunctionsItem]] = None + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Variables to be used as Jinja prompt templates and in functions as arguments + """ + + input_image: str + text_prompt: str + obj_scale: typing.Optional[float] = None + obj_pos_x: typing.Optional[float] = None + obj_pos_y: typing.Optional[float] = None + mask_threshold: typing.Optional[float] = None + selected_model: typing.Optional[ObjectInpaintingPageRequestSelectedModel] = None + negative_prompt: typing.Optional[str] = None + num_outputs: typing.Optional[int] = None + quality: typing.Optional[int] = None + output_width: typing.Optional[int] = None + output_height: typing.Optional[int] = None + guidance_scale: typing.Optional[float] = None + sd2upscaling: typing.Optional[bool] = pydantic.Field(alias="sd_2_upscaling", default=None) + seed: typing.Optional[int] = None + settings: typing.Optional[RunSettings] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/object_inpainting_page_request_functions_item.py b/src/gooey/types/object_inpainting_page_request_functions_item.py new file mode 100644 index 0000000..17d230e --- /dev/null +++ b/src/gooey/types/object_inpainting_page_request_functions_item.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from .object_inpainting_page_request_functions_item_trigger import ObjectInpaintingPageRequestFunctionsItemTrigger +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing + + +class ObjectInpaintingPageRequestFunctionsItem(UniversalBaseModel): + url: str + trigger: ObjectInpaintingPageRequestFunctionsItemTrigger = pydantic.Field() + """ + When to run this function. `pre` runs before the recipe, `post` runs after the recipe. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/object_inpainting_page_request_functions_item_trigger.py b/src/gooey/types/object_inpainting_page_request_functions_item_trigger.py new file mode 100644 index 0000000..6412226 --- /dev/null +++ b/src/gooey/types/object_inpainting_page_request_functions_item_trigger.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +ObjectInpaintingPageRequestFunctionsItemTrigger = typing.Union[typing.Literal["pre", "post"], typing.Any] diff --git a/src/gooey/types/qr_code_generator_page_request.py b/src/gooey/types/qr_code_generator_page_request.py new file mode 100644 index 0000000..f411ce1 --- /dev/null +++ b/src/gooey/types/qr_code_generator_page_request.py @@ -0,0 +1,66 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .qr_code_generator_page_request_functions_item import QrCodeGeneratorPageRequestFunctionsItem +import pydantic +from .qr_code_generator_page_request_qr_code_vcard import QrCodeGeneratorPageRequestQrCodeVcard +from .qr_code_generator_page_request_image_prompt_controlnet_models_item import ( + QrCodeGeneratorPageRequestImagePromptControlnetModelsItem, +) +from .qr_code_generator_page_request_selected_model import QrCodeGeneratorPageRequestSelectedModel +from .qr_code_generator_page_request_selected_controlnet_model_item import ( + QrCodeGeneratorPageRequestSelectedControlnetModelItem, +) +from .qr_code_generator_page_request_scheduler import QrCodeGeneratorPageRequestScheduler +from .run_settings import RunSettings +from ..core.pydantic_utilities import IS_PYDANTIC_V2 + + +class QrCodeGeneratorPageRequest(UniversalBaseModel): + functions: typing.Optional[typing.List[QrCodeGeneratorPageRequestFunctionsItem]] = None + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Variables to be used as Jinja prompt templates and in functions as arguments + """ + + qr_code_data: typing.Optional[str] = None + qr_code_input_image: typing.Optional[str] = None + qr_code_vcard: typing.Optional[QrCodeGeneratorPageRequestQrCodeVcard] = None + qr_code_file: typing.Optional[str] = None + use_url_shortener: typing.Optional[bool] = None + text_prompt: str + negative_prompt: typing.Optional[str] = None + image_prompt: typing.Optional[str] = None + image_prompt_controlnet_models: typing.Optional[ + typing.List[QrCodeGeneratorPageRequestImagePromptControlnetModelsItem] + ] = None + image_prompt_strength: typing.Optional[float] = None + image_prompt_scale: typing.Optional[float] = None + image_prompt_pos_x: typing.Optional[float] = None + image_prompt_pos_y: typing.Optional[float] = None + selected_model: typing.Optional[QrCodeGeneratorPageRequestSelectedModel] = None + selected_controlnet_model: typing.Optional[typing.List[QrCodeGeneratorPageRequestSelectedControlnetModelItem]] = ( + None + ) + output_width: typing.Optional[int] = None + output_height: typing.Optional[int] = None + guidance_scale: typing.Optional[float] = None + controlnet_conditioning_scale: typing.Optional[typing.List[float]] = None + num_outputs: typing.Optional[int] = None + quality: typing.Optional[int] = None + scheduler: typing.Optional[QrCodeGeneratorPageRequestScheduler] = None + seed: typing.Optional[int] = None + obj_scale: typing.Optional[float] = None + obj_pos_x: typing.Optional[float] = None + obj_pos_y: typing.Optional[float] = None + settings: typing.Optional[RunSettings] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/qr_code_generator_page_request_functions_item.py b/src/gooey/types/qr_code_generator_page_request_functions_item.py new file mode 100644 index 0000000..bfa8fc1 --- /dev/null +++ b/src/gooey/types/qr_code_generator_page_request_functions_item.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from .qr_code_generator_page_request_functions_item_trigger import QrCodeGeneratorPageRequestFunctionsItemTrigger +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing + + +class QrCodeGeneratorPageRequestFunctionsItem(UniversalBaseModel): + url: str + trigger: QrCodeGeneratorPageRequestFunctionsItemTrigger = pydantic.Field() + """ + When to run this function. `pre` runs before the recipe, `post` runs after the recipe. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/qr_code_generator_page_request_functions_item_trigger.py b/src/gooey/types/qr_code_generator_page_request_functions_item_trigger.py new file mode 100644 index 0000000..af392bc --- /dev/null +++ b/src/gooey/types/qr_code_generator_page_request_functions_item_trigger.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +QrCodeGeneratorPageRequestFunctionsItemTrigger = typing.Union[typing.Literal["pre", "post"], typing.Any] diff --git a/src/gooey/types/qr_code_generator_page_request_qr_code_vcard.py b/src/gooey/types/qr_code_generator_page_request_qr_code_vcard.py new file mode 100644 index 0000000..956b8c1 --- /dev/null +++ b/src/gooey/types/qr_code_generator_page_request_qr_code_vcard.py @@ -0,0 +1,44 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class QrCodeGeneratorPageRequestQrCodeVcard(UniversalBaseModel): + format_name: str + email: typing.Optional[str] = None + gender: typing.Optional[str] = None + birthday_year: typing.Optional[int] = None + birthday_month: typing.Optional[int] = None + birthday_day: typing.Optional[int] = None + family_name: typing.Optional[str] = None + given_name: typing.Optional[str] = None + middle_names: typing.Optional[str] = None + honorific_prefixes: typing.Optional[str] = None + honorific_suffixes: typing.Optional[str] = None + impp: typing.Optional[str] = None + address: typing.Optional[str] = None + calendar_url: typing.Optional[str] = None + comma_separated_categories: typing.Optional[str] = None + kind: typing.Optional[str] = None + language: typing.Optional[str] = None + organization: typing.Optional[str] = None + photo_url: typing.Optional[str] = None + logo_url: typing.Optional[str] = None + role: typing.Optional[str] = None + timezone: typing.Optional[str] = None + job_title: typing.Optional[str] = None + urls: typing.Optional[typing.List[str]] = None + tel: typing.Optional[str] = None + note: typing.Optional[str] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/recipe_function.py b/src/gooey/types/recipe_function.py index ed79772..08bea99 100644 --- a/src/gooey/types/recipe_function.py +++ b/src/gooey/types/recipe_function.py @@ -1,18 +1,14 @@ # This file was auto-generated by Fern from our API Definition. from ..core.pydantic_utilities import UniversalBaseModel -import pydantic from .recipe_function_trigger import RecipeFunctionTrigger +import pydantic from ..core.pydantic_utilities import IS_PYDANTIC_V2 import typing class RecipeFunction(UniversalBaseModel): - url: str = pydantic.Field() - """ - The URL of the [function](https://gooey.ai/functions) to call. - """ - + url: str trigger: RecipeFunctionTrigger = pydantic.Field() """ When to run this function. `pre` runs before the recipe, `post` runs after the recipe. diff --git a/src/gooey/types/related_qn_a_doc_page_request.py b/src/gooey/types/related_qn_a_doc_page_request.py new file mode 100644 index 0000000..d39235f --- /dev/null +++ b/src/gooey/types/related_qn_a_doc_page_request.py @@ -0,0 +1,70 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .related_qn_a_doc_page_request_functions_item import RelatedQnADocPageRequestFunctionsItem +import pydantic +from .related_qn_a_doc_page_request_keyword_query import RelatedQnADocPageRequestKeywordQuery +from .related_qn_a_doc_page_request_embedding_model import RelatedQnADocPageRequestEmbeddingModel +from .related_qn_a_doc_page_request_selected_model import RelatedQnADocPageRequestSelectedModel +from .related_qn_a_doc_page_request_citation_style import RelatedQnADocPageRequestCitationStyle +from .related_qn_a_doc_page_request_response_format_type import RelatedQnADocPageRequestResponseFormatType +from .serp_search_location import SerpSearchLocation +from .serp_search_type import SerpSearchType +from .run_settings import RunSettings +from ..core.pydantic_utilities import IS_PYDANTIC_V2 + + +class RelatedQnADocPageRequest(UniversalBaseModel): + functions: typing.Optional[typing.List[RelatedQnADocPageRequestFunctionsItem]] = None + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Variables to be used as Jinja prompt templates and in functions as arguments + """ + + search_query: str + keyword_query: typing.Optional[RelatedQnADocPageRequestKeywordQuery] = None + documents: typing.Optional[typing.List[str]] = None + max_references: typing.Optional[int] = None + max_context_words: typing.Optional[int] = None + scroll_jump: typing.Optional[int] = None + doc_extract_url: typing.Optional[str] = None + embedding_model: typing.Optional[RelatedQnADocPageRequestEmbeddingModel] = None + dense_weight: typing.Optional[float] = pydantic.Field(default=None) + """ + Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. + Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. + """ + + task_instructions: typing.Optional[str] = None + query_instructions: typing.Optional[str] = None + selected_model: typing.Optional[RelatedQnADocPageRequestSelectedModel] = None + citation_style: typing.Optional[RelatedQnADocPageRequestCitationStyle] = None + avoid_repetition: typing.Optional[bool] = None + num_outputs: typing.Optional[int] = None + quality: typing.Optional[float] = None + max_tokens: typing.Optional[int] = None + sampling_temperature: typing.Optional[float] = None + response_format_type: typing.Optional[RelatedQnADocPageRequestResponseFormatType] = None + serp_search_location: typing.Optional[SerpSearchLocation] = None + scaleserp_locations: typing.Optional[typing.List[str]] = pydantic.Field(default=None) + """ + DEPRECATED: use `serp_search_location` instead + """ + + serp_search_type: typing.Optional[SerpSearchType] = None + scaleserp_search_field: typing.Optional[str] = pydantic.Field(default=None) + """ + DEPRECATED: use `serp_search_type` instead + """ + + settings: typing.Optional[RunSettings] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/related_qn_a_doc_page_request_functions_item.py b/src/gooey/types/related_qn_a_doc_page_request_functions_item.py new file mode 100644 index 0000000..5fdda28 --- /dev/null +++ b/src/gooey/types/related_qn_a_doc_page_request_functions_item.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from .related_qn_a_doc_page_request_functions_item_trigger import RelatedQnADocPageRequestFunctionsItemTrigger +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing + + +class RelatedQnADocPageRequestFunctionsItem(UniversalBaseModel): + url: str + trigger: RelatedQnADocPageRequestFunctionsItemTrigger = pydantic.Field() + """ + When to run this function. `pre` runs before the recipe, `post` runs after the recipe. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/related_qn_a_doc_page_request_functions_item_trigger.py b/src/gooey/types/related_qn_a_doc_page_request_functions_item_trigger.py new file mode 100644 index 0000000..efa683c --- /dev/null +++ b/src/gooey/types/related_qn_a_doc_page_request_functions_item_trigger.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +RelatedQnADocPageRequestFunctionsItemTrigger = typing.Union[typing.Literal["pre", "post"], typing.Any] diff --git a/src/gooey/types/related_qn_a_page_request.py b/src/gooey/types/related_qn_a_page_request.py new file mode 100644 index 0000000..26836dc --- /dev/null +++ b/src/gooey/types/related_qn_a_page_request.py @@ -0,0 +1,66 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .related_qn_a_page_request_functions_item import RelatedQnAPageRequestFunctionsItem +import pydantic +from .related_qn_a_page_request_selected_model import RelatedQnAPageRequestSelectedModel +from .related_qn_a_page_request_embedding_model import RelatedQnAPageRequestEmbeddingModel +from .related_qn_a_page_request_response_format_type import RelatedQnAPageRequestResponseFormatType +from .serp_search_location import SerpSearchLocation +from .serp_search_type import SerpSearchType +from .run_settings import RunSettings +from ..core.pydantic_utilities import IS_PYDANTIC_V2 + + +class RelatedQnAPageRequest(UniversalBaseModel): + functions: typing.Optional[typing.List[RelatedQnAPageRequestFunctionsItem]] = None + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Variables to be used as Jinja prompt templates and in functions as arguments + """ + + search_query: str + site_filter: str + task_instructions: typing.Optional[str] = None + query_instructions: typing.Optional[str] = None + selected_model: typing.Optional[RelatedQnAPageRequestSelectedModel] = None + max_search_urls: typing.Optional[int] = None + max_references: typing.Optional[int] = None + max_context_words: typing.Optional[int] = None + scroll_jump: typing.Optional[int] = None + embedding_model: typing.Optional[RelatedQnAPageRequestEmbeddingModel] = None + dense_weight: typing.Optional[float] = pydantic.Field(default=None) + """ + Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. + Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. + """ + + avoid_repetition: typing.Optional[bool] = None + num_outputs: typing.Optional[int] = None + quality: typing.Optional[float] = None + max_tokens: typing.Optional[int] = None + sampling_temperature: typing.Optional[float] = None + response_format_type: typing.Optional[RelatedQnAPageRequestResponseFormatType] = None + serp_search_location: typing.Optional[SerpSearchLocation] = None + scaleserp_locations: typing.Optional[typing.List[str]] = pydantic.Field(default=None) + """ + DEPRECATED: use `serp_search_location` instead + """ + + serp_search_type: typing.Optional[SerpSearchType] = None + scaleserp_search_field: typing.Optional[str] = pydantic.Field(default=None) + """ + DEPRECATED: use `serp_search_type` instead + """ + + settings: typing.Optional[RunSettings] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/related_qn_a_page_request_functions_item.py b/src/gooey/types/related_qn_a_page_request_functions_item.py new file mode 100644 index 0000000..0300307 --- /dev/null +++ b/src/gooey/types/related_qn_a_page_request_functions_item.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from .related_qn_a_page_request_functions_item_trigger import RelatedQnAPageRequestFunctionsItemTrigger +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing + + +class RelatedQnAPageRequestFunctionsItem(UniversalBaseModel): + url: str + trigger: RelatedQnAPageRequestFunctionsItemTrigger = pydantic.Field() + """ + When to run this function. `pre` runs before the recipe, `post` runs after the recipe. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/related_qn_a_page_request_functions_item_trigger.py b/src/gooey/types/related_qn_a_page_request_functions_item_trigger.py new file mode 100644 index 0000000..4276d4a --- /dev/null +++ b/src/gooey/types/related_qn_a_page_request_functions_item_trigger.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +RelatedQnAPageRequestFunctionsItemTrigger = typing.Union[typing.Literal["pre", "post"], typing.Any] diff --git a/src/gooey/types/sad_talker_settings.py b/src/gooey/types/sad_talker_settings.py index 85464e7..c9200b4 100644 --- a/src/gooey/types/sad_talker_settings.py +++ b/src/gooey/types/sad_talker_settings.py @@ -24,16 +24,8 @@ class SadTalkerSettings(UniversalBaseModel): Scale the amount of expression motion. 1.0 is normal, 0.5 is very reduced, and 2.0 is quite a lot. """ - ref_eyeblink: typing.Optional[str] = pydantic.Field(default=None) - """ - Optional reference video for eyeblinks to make the eyebrow movement more natural. - """ - - ref_pose: typing.Optional[str] = pydantic.Field(default=None) - """ - Optional reference video to pose the head. - """ - + ref_eyeblink: typing.Optional[str] = None + ref_pose: typing.Optional[str] = None input_yaw: typing.Optional[typing.List[int]] = None input_pitch: typing.Optional[typing.List[int]] = None input_roll: typing.Optional[typing.List[int]] = None diff --git a/src/gooey/types/seo_summary_page_request.py b/src/gooey/types/seo_summary_page_request.py new file mode 100644 index 0000000..4515d26 --- /dev/null +++ b/src/gooey/types/seo_summary_page_request.py @@ -0,0 +1,52 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .seo_summary_page_request_selected_model import SeoSummaryPageRequestSelectedModel +from .seo_summary_page_request_response_format_type import SeoSummaryPageRequestResponseFormatType +from .serp_search_location import SerpSearchLocation +import pydantic +from .serp_search_type import SerpSearchType +from .run_settings import RunSettings +from ..core.pydantic_utilities import IS_PYDANTIC_V2 + + +class SeoSummaryPageRequest(UniversalBaseModel): + search_query: str + keywords: str + title: str + company_url: str + task_instructions: typing.Optional[str] = None + enable_html: typing.Optional[bool] = None + selected_model: typing.Optional[SeoSummaryPageRequestSelectedModel] = None + max_search_urls: typing.Optional[int] = None + enable_crosslinks: typing.Optional[bool] = None + seed: typing.Optional[int] = None + avoid_repetition: typing.Optional[bool] = None + num_outputs: typing.Optional[int] = None + quality: typing.Optional[float] = None + max_tokens: typing.Optional[int] = None + sampling_temperature: typing.Optional[float] = None + response_format_type: typing.Optional[SeoSummaryPageRequestResponseFormatType] = None + serp_search_location: typing.Optional[SerpSearchLocation] = None + scaleserp_locations: typing.Optional[typing.List[str]] = pydantic.Field(default=None) + """ + DEPRECATED: use `serp_search_location` instead + """ + + serp_search_type: typing.Optional[SerpSearchType] = None + scaleserp_search_field: typing.Optional[str] = pydantic.Field(default=None) + """ + DEPRECATED: use `serp_search_type` instead + """ + + settings: typing.Optional[RunSettings] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/smart_gpt_page_request.py b/src/gooey/types/smart_gpt_page_request.py new file mode 100644 index 0000000..d2353ff --- /dev/null +++ b/src/gooey/types/smart_gpt_page_request.py @@ -0,0 +1,40 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .smart_gpt_page_request_functions_item import SmartGptPageRequestFunctionsItem +import pydantic +from .smart_gpt_page_request_selected_model import SmartGptPageRequestSelectedModel +from .smart_gpt_page_request_response_format_type import SmartGptPageRequestResponseFormatType +from .run_settings import RunSettings +from ..core.pydantic_utilities import IS_PYDANTIC_V2 + + +class SmartGptPageRequest(UniversalBaseModel): + functions: typing.Optional[typing.List[SmartGptPageRequestFunctionsItem]] = None + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Variables to be used as Jinja prompt templates and in functions as arguments + """ + + input_prompt: str + cot_prompt: typing.Optional[str] = None + reflexion_prompt: typing.Optional[str] = None + dera_prompt: typing.Optional[str] = None + selected_model: typing.Optional[SmartGptPageRequestSelectedModel] = None + avoid_repetition: typing.Optional[bool] = None + num_outputs: typing.Optional[int] = None + quality: typing.Optional[float] = None + max_tokens: typing.Optional[int] = None + sampling_temperature: typing.Optional[float] = None + response_format_type: typing.Optional[SmartGptPageRequestResponseFormatType] = None + settings: typing.Optional[RunSettings] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/smart_gpt_page_request_functions_item.py b/src/gooey/types/smart_gpt_page_request_functions_item.py new file mode 100644 index 0000000..edb2c83 --- /dev/null +++ b/src/gooey/types/smart_gpt_page_request_functions_item.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from .smart_gpt_page_request_functions_item_trigger import SmartGptPageRequestFunctionsItemTrigger +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing + + +class SmartGptPageRequestFunctionsItem(UniversalBaseModel): + url: str + trigger: SmartGptPageRequestFunctionsItemTrigger = pydantic.Field() + """ + When to run this function. `pre` runs before the recipe, `post` runs after the recipe. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/smart_gpt_page_request_functions_item_trigger.py b/src/gooey/types/smart_gpt_page_request_functions_item_trigger.py new file mode 100644 index 0000000..09deb0a --- /dev/null +++ b/src/gooey/types/smart_gpt_page_request_functions_item_trigger.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +SmartGptPageRequestFunctionsItemTrigger = typing.Union[typing.Literal["pre", "post"], typing.Any] diff --git a/src/gooey/smart_gpt/types/smart_gpt_page_request_response_format_type.py b/src/gooey/types/smart_gpt_page_request_response_format_type.py similarity index 100% rename from src/gooey/smart_gpt/types/smart_gpt_page_request_response_format_type.py rename to src/gooey/types/smart_gpt_page_request_response_format_type.py diff --git a/src/gooey/smart_gpt/types/smart_gpt_page_request_selected_model.py b/src/gooey/types/smart_gpt_page_request_selected_model.py similarity index 100% rename from src/gooey/smart_gpt/types/smart_gpt_page_request_selected_model.py rename to src/gooey/types/smart_gpt_page_request_selected_model.py diff --git a/src/gooey/types/social_lookup_email_page_request.py b/src/gooey/types/social_lookup_email_page_request.py new file mode 100644 index 0000000..a09b47e --- /dev/null +++ b/src/gooey/types/social_lookup_email_page_request.py @@ -0,0 +1,38 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .social_lookup_email_page_request_functions_item import SocialLookupEmailPageRequestFunctionsItem +import pydantic +from .social_lookup_email_page_request_selected_model import SocialLookupEmailPageRequestSelectedModel +from .social_lookup_email_page_request_response_format_type import SocialLookupEmailPageRequestResponseFormatType +from .run_settings import RunSettings +from ..core.pydantic_utilities import IS_PYDANTIC_V2 + + +class SocialLookupEmailPageRequest(UniversalBaseModel): + functions: typing.Optional[typing.List[SocialLookupEmailPageRequestFunctionsItem]] = None + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Variables to be used as Jinja prompt templates and in functions as arguments + """ + + email_address: str + input_prompt: typing.Optional[str] = None + selected_model: typing.Optional[SocialLookupEmailPageRequestSelectedModel] = None + avoid_repetition: typing.Optional[bool] = None + num_outputs: typing.Optional[int] = None + quality: typing.Optional[float] = None + max_tokens: typing.Optional[int] = None + sampling_temperature: typing.Optional[float] = None + response_format_type: typing.Optional[SocialLookupEmailPageRequestResponseFormatType] = None + settings: typing.Optional[RunSettings] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/social_lookup_email_page_request_functions_item.py b/src/gooey/types/social_lookup_email_page_request_functions_item.py new file mode 100644 index 0000000..cdd44bd --- /dev/null +++ b/src/gooey/types/social_lookup_email_page_request_functions_item.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from .social_lookup_email_page_request_functions_item_trigger import SocialLookupEmailPageRequestFunctionsItemTrigger +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing + + +class SocialLookupEmailPageRequestFunctionsItem(UniversalBaseModel): + url: str + trigger: SocialLookupEmailPageRequestFunctionsItemTrigger = pydantic.Field() + """ + When to run this function. `pre` runs before the recipe, `post` runs after the recipe. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/social_lookup_email_page_request_functions_item_trigger.py b/src/gooey/types/social_lookup_email_page_request_functions_item_trigger.py new file mode 100644 index 0000000..2dc85c6 --- /dev/null +++ b/src/gooey/types/social_lookup_email_page_request_functions_item_trigger.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +SocialLookupEmailPageRequestFunctionsItemTrigger = typing.Union[typing.Literal["pre", "post"], typing.Any] diff --git a/src/gooey/types/text2audio_page_request.py b/src/gooey/types/text2audio_page_request.py new file mode 100644 index 0000000..5594488 --- /dev/null +++ b/src/gooey/types/text2audio_page_request.py @@ -0,0 +1,36 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .text2audio_page_request_functions_item import Text2AudioPageRequestFunctionsItem +import pydantic +from .run_settings import RunSettings +from ..core.pydantic_utilities import IS_PYDANTIC_V2 + + +class Text2AudioPageRequest(UniversalBaseModel): + functions: typing.Optional[typing.List[Text2AudioPageRequestFunctionsItem]] = None + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Variables to be used as Jinja prompt templates and in functions as arguments + """ + + text_prompt: str + negative_prompt: typing.Optional[str] = None + duration_sec: typing.Optional[float] = None + num_outputs: typing.Optional[int] = None + quality: typing.Optional[int] = None + guidance_scale: typing.Optional[float] = None + seed: typing.Optional[int] = None + sd2upscaling: typing.Optional[bool] = pydantic.Field(alias="sd_2_upscaling", default=None) + selected_models: typing.Optional[typing.List[typing.Literal["audio_ldm"]]] = None + settings: typing.Optional[RunSettings] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/text2audio_page_request_functions_item.py b/src/gooey/types/text2audio_page_request_functions_item.py new file mode 100644 index 0000000..a8cacb6 --- /dev/null +++ b/src/gooey/types/text2audio_page_request_functions_item.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from .text2audio_page_request_functions_item_trigger import Text2AudioPageRequestFunctionsItemTrigger +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing + + +class Text2AudioPageRequestFunctionsItem(UniversalBaseModel): + url: str + trigger: Text2AudioPageRequestFunctionsItemTrigger = pydantic.Field() + """ + When to run this function. `pre` runs before the recipe, `post` runs after the recipe. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/text2audio_page_request_functions_item_trigger.py b/src/gooey/types/text2audio_page_request_functions_item_trigger.py new file mode 100644 index 0000000..921a95a --- /dev/null +++ b/src/gooey/types/text2audio_page_request_functions_item_trigger.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +Text2AudioPageRequestFunctionsItemTrigger = typing.Union[typing.Literal["pre", "post"], typing.Any] diff --git a/src/gooey/types/text_to_speech_page_request.py b/src/gooey/types/text_to_speech_page_request.py new file mode 100644 index 0000000..fa527a5 --- /dev/null +++ b/src/gooey/types/text_to_speech_page_request.py @@ -0,0 +1,53 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .text_to_speech_page_request_functions_item import TextToSpeechPageRequestFunctionsItem +import pydantic +from .text_to_speech_page_request_tts_provider import TextToSpeechPageRequestTtsProvider +from .text_to_speech_page_request_openai_voice_name import TextToSpeechPageRequestOpenaiVoiceName +from .text_to_speech_page_request_openai_tts_model import TextToSpeechPageRequestOpenaiTtsModel +from .run_settings import RunSettings +from ..core.pydantic_utilities import IS_PYDANTIC_V2 + + +class TextToSpeechPageRequest(UniversalBaseModel): + functions: typing.Optional[typing.List[TextToSpeechPageRequestFunctionsItem]] = None + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Variables to be used as Jinja prompt templates and in functions as arguments + """ + + text_prompt: str + tts_provider: typing.Optional[TextToSpeechPageRequestTtsProvider] = None + uberduck_voice_name: typing.Optional[str] = None + uberduck_speaking_rate: typing.Optional[float] = None + google_voice_name: typing.Optional[str] = None + google_speaking_rate: typing.Optional[float] = None + google_pitch: typing.Optional[float] = None + bark_history_prompt: typing.Optional[str] = None + elevenlabs_voice_name: typing.Optional[str] = pydantic.Field(default=None) + """ + Use `elevenlabs_voice_id` instead + """ + + elevenlabs_api_key: typing.Optional[str] = None + elevenlabs_voice_id: typing.Optional[str] = None + elevenlabs_model: typing.Optional[str] = None + elevenlabs_stability: typing.Optional[float] = None + elevenlabs_similarity_boost: typing.Optional[float] = None + elevenlabs_style: typing.Optional[float] = None + elevenlabs_speaker_boost: typing.Optional[bool] = None + azure_voice_name: typing.Optional[str] = None + openai_voice_name: typing.Optional[TextToSpeechPageRequestOpenaiVoiceName] = None + openai_tts_model: typing.Optional[TextToSpeechPageRequestOpenaiTtsModel] = None + settings: typing.Optional[RunSettings] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/text_to_speech_page_request_functions_item.py b/src/gooey/types/text_to_speech_page_request_functions_item.py new file mode 100644 index 0000000..4c4effd --- /dev/null +++ b/src/gooey/types/text_to_speech_page_request_functions_item.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from .text_to_speech_page_request_functions_item_trigger import TextToSpeechPageRequestFunctionsItemTrigger +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing + + +class TextToSpeechPageRequestFunctionsItem(UniversalBaseModel): + url: str + trigger: TextToSpeechPageRequestFunctionsItemTrigger = pydantic.Field() + """ + When to run this function. `pre` runs before the recipe, `post` runs after the recipe. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/text_to_speech_page_request_functions_item_trigger.py b/src/gooey/types/text_to_speech_page_request_functions_item_trigger.py new file mode 100644 index 0000000..0712d26 --- /dev/null +++ b/src/gooey/types/text_to_speech_page_request_functions_item_trigger.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +TextToSpeechPageRequestFunctionsItemTrigger = typing.Union[typing.Literal["pre", "post"], typing.Any] diff --git a/src/gooey/types/translation_page_request.py b/src/gooey/types/translation_page_request.py new file mode 100644 index 0000000..a1931cd --- /dev/null +++ b/src/gooey/types/translation_page_request.py @@ -0,0 +1,33 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .translation_page_request_functions_item import TranslationPageRequestFunctionsItem +import pydantic +from .translation_page_request_selected_model import TranslationPageRequestSelectedModel +from .run_settings import RunSettings +from ..core.pydantic_utilities import IS_PYDANTIC_V2 + + +class TranslationPageRequest(UniversalBaseModel): + functions: typing.Optional[typing.List[TranslationPageRequestFunctionsItem]] = None + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Variables to be used as Jinja prompt templates and in functions as arguments + """ + + texts: typing.Optional[typing.List[str]] = None + selected_model: typing.Optional[TranslationPageRequestSelectedModel] = None + translation_source: typing.Optional[str] = None + translation_target: typing.Optional[str] = None + glossary_document: typing.Optional[str] = None + settings: typing.Optional[RunSettings] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/translation_page_request_functions_item.py b/src/gooey/types/translation_page_request_functions_item.py new file mode 100644 index 0000000..522e42b --- /dev/null +++ b/src/gooey/types/translation_page_request_functions_item.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from .translation_page_request_functions_item_trigger import TranslationPageRequestFunctionsItemTrigger +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing + + +class TranslationPageRequestFunctionsItem(UniversalBaseModel): + url: str + trigger: TranslationPageRequestFunctionsItemTrigger = pydantic.Field() + """ + When to run this function. `pre` runs before the recipe, `post` runs after the recipe. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/translation_page_request_functions_item_trigger.py b/src/gooey/types/translation_page_request_functions_item_trigger.py new file mode 100644 index 0000000..980bb69 --- /dev/null +++ b/src/gooey/types/translation_page_request_functions_item_trigger.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +TranslationPageRequestFunctionsItemTrigger = typing.Union[typing.Literal["pre", "post"], typing.Any] diff --git a/src/gooey/types/video_bots_page_request.py b/src/gooey/types/video_bots_page_request.py new file mode 100644 index 0000000..5b69081 --- /dev/null +++ b/src/gooey/types/video_bots_page_request.py @@ -0,0 +1,130 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .video_bots_page_request_functions_item import VideoBotsPageRequestFunctionsItem +import pydantic +from .video_bots_page_request_messages_item import VideoBotsPageRequestMessagesItem +from .video_bots_page_request_selected_model import VideoBotsPageRequestSelectedModel +from .video_bots_page_request_embedding_model import VideoBotsPageRequestEmbeddingModel +from .video_bots_page_request_citation_style import VideoBotsPageRequestCitationStyle +from .video_bots_page_request_asr_model import VideoBotsPageRequestAsrModel +from .video_bots_page_request_translation_model import VideoBotsPageRequestTranslationModel +from .video_bots_page_request_lipsync_model import VideoBotsPageRequestLipsyncModel +from .video_bots_page_request_response_format_type import VideoBotsPageRequestResponseFormatType +from .video_bots_page_request_tts_provider import VideoBotsPageRequestTtsProvider +from .video_bots_page_request_openai_voice_name import VideoBotsPageRequestOpenaiVoiceName +from .video_bots_page_request_openai_tts_model import VideoBotsPageRequestOpenaiTtsModel +from .video_bots_page_request_sadtalker_settings import VideoBotsPageRequestSadtalkerSettings +from .run_settings import RunSettings +from ..core.pydantic_utilities import IS_PYDANTIC_V2 + + +class VideoBotsPageRequest(UniversalBaseModel): + functions: typing.Optional[typing.List[VideoBotsPageRequestFunctionsItem]] = None + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Variables to be used as Jinja prompt templates and in functions as arguments + """ + + input_prompt: typing.Optional[str] = None + input_audio: typing.Optional[str] = None + input_images: typing.Optional[typing.List[str]] = None + input_documents: typing.Optional[typing.List[str]] = None + doc_extract_url: typing.Optional[str] = pydantic.Field(default=None) + """ + Select a workflow to extract text from documents and images. + """ + + messages: typing.Optional[typing.List[VideoBotsPageRequestMessagesItem]] = None + bot_script: typing.Optional[str] = None + selected_model: typing.Optional[VideoBotsPageRequestSelectedModel] = None + document_model: typing.Optional[str] = pydantic.Field(default=None) + """ + When your copilot users upload a photo or pdf, what kind of document are they mostly likely to upload? (via [Azure](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/how-to-guides/use-sdk-rest-api?view=doc-intel-3.1.0&tabs=linux&pivots=programming-language-rest-api)) + """ + + task_instructions: typing.Optional[str] = None + query_instructions: typing.Optional[str] = None + keyword_instructions: typing.Optional[str] = None + documents: typing.Optional[typing.List[str]] = None + max_references: typing.Optional[int] = None + max_context_words: typing.Optional[int] = None + scroll_jump: typing.Optional[int] = None + embedding_model: typing.Optional[VideoBotsPageRequestEmbeddingModel] = None + dense_weight: typing.Optional[float] = pydantic.Field(default=None) + """ + Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. + Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. + """ + + citation_style: typing.Optional[VideoBotsPageRequestCitationStyle] = None + use_url_shortener: typing.Optional[bool] = None + asr_model: typing.Optional[VideoBotsPageRequestAsrModel] = pydantic.Field(default=None) + """ + Choose a model to transcribe incoming audio messages to text. + """ + + asr_language: typing.Optional[str] = pydantic.Field(default=None) + """ + Choose a language to transcribe incoming audio messages to text. + """ + + translation_model: typing.Optional[VideoBotsPageRequestTranslationModel] = None + user_language: typing.Optional[str] = pydantic.Field(default=None) + """ + Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages. + """ + + input_glossary_document: typing.Optional[str] = None + output_glossary_document: typing.Optional[str] = None + lipsync_model: typing.Optional[VideoBotsPageRequestLipsyncModel] = None + tools: typing.Optional[typing.List[typing.Literal["json_to_pdf"]]] = pydantic.Field(default=None) + """ + Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling). + """ + + avoid_repetition: typing.Optional[bool] = None + num_outputs: typing.Optional[int] = None + quality: typing.Optional[float] = None + max_tokens: typing.Optional[int] = None + sampling_temperature: typing.Optional[float] = None + response_format_type: typing.Optional[VideoBotsPageRequestResponseFormatType] = None + tts_provider: typing.Optional[VideoBotsPageRequestTtsProvider] = None + uberduck_voice_name: typing.Optional[str] = None + uberduck_speaking_rate: typing.Optional[float] = None + google_voice_name: typing.Optional[str] = None + google_speaking_rate: typing.Optional[float] = None + google_pitch: typing.Optional[float] = None + bark_history_prompt: typing.Optional[str] = None + elevenlabs_voice_name: typing.Optional[str] = pydantic.Field(default=None) + """ + Use `elevenlabs_voice_id` instead + """ + + elevenlabs_api_key: typing.Optional[str] = None + elevenlabs_voice_id: typing.Optional[str] = None + elevenlabs_model: typing.Optional[str] = None + elevenlabs_stability: typing.Optional[float] = None + elevenlabs_similarity_boost: typing.Optional[float] = None + elevenlabs_style: typing.Optional[float] = None + elevenlabs_speaker_boost: typing.Optional[bool] = None + azure_voice_name: typing.Optional[str] = None + openai_voice_name: typing.Optional[VideoBotsPageRequestOpenaiVoiceName] = None + openai_tts_model: typing.Optional[VideoBotsPageRequestOpenaiTtsModel] = None + input_face: typing.Optional[str] = None + face_padding_top: typing.Optional[int] = None + face_padding_bottom: typing.Optional[int] = None + face_padding_left: typing.Optional[int] = None + face_padding_right: typing.Optional[int] = None + sadtalker_settings: typing.Optional[VideoBotsPageRequestSadtalkerSettings] = None + settings: typing.Optional[RunSettings] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_asr_model.py b/src/gooey/types/video_bots_page_request_asr_model.py similarity index 100% rename from src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_asr_model.py rename to src/gooey/types/video_bots_page_request_asr_model.py diff --git a/src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_citation_style.py b/src/gooey/types/video_bots_page_request_citation_style.py similarity index 100% rename from src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_citation_style.py rename to src/gooey/types/video_bots_page_request_citation_style.py diff --git a/src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_embedding_model.py b/src/gooey/types/video_bots_page_request_embedding_model.py similarity index 100% rename from src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_embedding_model.py rename to src/gooey/types/video_bots_page_request_embedding_model.py diff --git a/src/gooey/types/video_bots_page_request_functions_item.py b/src/gooey/types/video_bots_page_request_functions_item.py new file mode 100644 index 0000000..5803c05 --- /dev/null +++ b/src/gooey/types/video_bots_page_request_functions_item.py @@ -0,0 +1,24 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from .video_bots_page_request_functions_item_trigger import VideoBotsPageRequestFunctionsItemTrigger +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import typing + + +class VideoBotsPageRequestFunctionsItem(UniversalBaseModel): + url: str + trigger: VideoBotsPageRequestFunctionsItemTrigger = pydantic.Field() + """ + When to run this function. `pre` runs before the recipe, `post` runs after the recipe. + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/video_bots_page_request_functions_item_trigger.py b/src/gooey/types/video_bots_page_request_functions_item_trigger.py new file mode 100644 index 0000000..b3c2078 --- /dev/null +++ b/src/gooey/types/video_bots_page_request_functions_item_trigger.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +VideoBotsPageRequestFunctionsItemTrigger = typing.Union[typing.Literal["pre", "post"], typing.Any] diff --git a/src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_lipsync_model.py b/src/gooey/types/video_bots_page_request_lipsync_model.py similarity index 100% rename from src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_lipsync_model.py rename to src/gooey/types/video_bots_page_request_lipsync_model.py diff --git a/src/gooey/types/video_bots_page_request_messages_item.py b/src/gooey/types/video_bots_page_request_messages_item.py new file mode 100644 index 0000000..004df6d --- /dev/null +++ b/src/gooey/types/video_bots_page_request_messages_item.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +from .video_bots_page_request_messages_item_role import VideoBotsPageRequestMessagesItemRole +from .video_bots_page_request_messages_item_content import VideoBotsPageRequestMessagesItemContent +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic + + +class VideoBotsPageRequestMessagesItem(UniversalBaseModel): + role: VideoBotsPageRequestMessagesItemRole + content: VideoBotsPageRequestMessagesItemContent + display_name: typing.Optional[str] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/video_bots_page_request_messages_item_content.py b/src/gooey/types/video_bots_page_request_messages_item_content.py new file mode 100644 index 0000000..1c8efb4 --- /dev/null +++ b/src/gooey/types/video_bots_page_request_messages_item_content.py @@ -0,0 +1,6 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from .video_bots_page_request_messages_item_content_item import VideoBotsPageRequestMessagesItemContentItem + +VideoBotsPageRequestMessagesItemContent = typing.Union[str, typing.List[VideoBotsPageRequestMessagesItemContentItem]] diff --git a/src/gooey/types/video_bots_page_request_messages_item_content_item.py b/src/gooey/types/video_bots_page_request_messages_item_content_item.py new file mode 100644 index 0000000..b7235db --- /dev/null +++ b/src/gooey/types/video_bots_page_request_messages_item_content_item.py @@ -0,0 +1,41 @@ +# This file was auto-generated by Fern from our API Definition. + +from __future__ import annotations +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from ..core.pydantic_utilities import IS_PYDANTIC_V2 +import pydantic +from .image_url import ImageUrl + + +class VideoBotsPageRequestMessagesItemContentItem_Text(UniversalBaseModel): + type: typing.Literal["text"] = "text" + text: typing.Optional[str] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +class VideoBotsPageRequestMessagesItemContentItem_ImageUrl(UniversalBaseModel): + type: typing.Literal["image_url"] = "image_url" + image_url: typing.Optional[ImageUrl] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow + + +VideoBotsPageRequestMessagesItemContentItem = typing.Union[ + VideoBotsPageRequestMessagesItemContentItem_Text, VideoBotsPageRequestMessagesItemContentItem_ImageUrl +] diff --git a/src/gooey/types/video_bots_page_request_messages_item_role.py b/src/gooey/types/video_bots_page_request_messages_item_role.py new file mode 100644 index 0000000..a2053c0 --- /dev/null +++ b/src/gooey/types/video_bots_page_request_messages_item_role.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +VideoBotsPageRequestMessagesItemRole = typing.Union[typing.Literal["user", "system", "assistant"], typing.Any] diff --git a/src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_openai_tts_model.py b/src/gooey/types/video_bots_page_request_openai_tts_model.py similarity index 100% rename from src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_openai_tts_model.py rename to src/gooey/types/video_bots_page_request_openai_tts_model.py diff --git a/src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_openai_voice_name.py b/src/gooey/types/video_bots_page_request_openai_voice_name.py similarity index 100% rename from src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_openai_voice_name.py rename to src/gooey/types/video_bots_page_request_openai_voice_name.py diff --git a/src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_response_format_type.py b/src/gooey/types/video_bots_page_request_response_format_type.py similarity index 100% rename from src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_response_format_type.py rename to src/gooey/types/video_bots_page_request_response_format_type.py diff --git a/src/gooey/types/video_bots_page_request_sadtalker_settings.py b/src/gooey/types/video_bots_page_request_sadtalker_settings.py new file mode 100644 index 0000000..6749388 --- /dev/null +++ b/src/gooey/types/video_bots_page_request_sadtalker_settings.py @@ -0,0 +1,40 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import typing +from .video_bots_page_request_sadtalker_settings_preprocess import VideoBotsPageRequestSadtalkerSettingsPreprocess +import pydantic +from ..core.pydantic_utilities import IS_PYDANTIC_V2 + + +class VideoBotsPageRequestSadtalkerSettings(UniversalBaseModel): + still: typing.Optional[bool] = None + preprocess: typing.Optional[VideoBotsPageRequestSadtalkerSettingsPreprocess] = pydantic.Field(default=None) + """ + SadTalker only generates 512x512 output. 'crop' handles this by cropping the input to 512x512. 'resize' scales down the input to fit 512x512 and scales it back up after lipsyncing (does not work well for full person images, better for portraits). 'full' processes the cropped region and pastes it back into the original input. 'extcrop' and 'extfull' are similar to 'crop' and 'full' but with extended cropping. + """ + + pose_style: typing.Optional[int] = pydantic.Field(default=None) + """ + Random seed 0-45 inclusive that affects how the pose is animated. + """ + + expression_scale: typing.Optional[float] = pydantic.Field(default=None) + """ + Scale the amount of expression motion. 1.0 is normal, 0.5 is very reduced, and 2.0 is quite a lot. + """ + + ref_eyeblink: typing.Optional[str] = None + ref_pose: typing.Optional[str] = None + input_yaw: typing.Optional[typing.List[int]] = None + input_pitch: typing.Optional[typing.List[int]] = None + input_roll: typing.Optional[typing.List[int]] = None + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/types/video_bots_page_request_sadtalker_settings_preprocess.py b/src/gooey/types/video_bots_page_request_sadtalker_settings_preprocess.py new file mode 100644 index 0000000..4a625ac --- /dev/null +++ b/src/gooey/types/video_bots_page_request_sadtalker_settings_preprocess.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +VideoBotsPageRequestSadtalkerSettingsPreprocess = typing.Union[ + typing.Literal["crop", "extcrop", "resize", "full", "extfull"], typing.Any +] diff --git a/src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_selected_model.py b/src/gooey/types/video_bots_page_request_selected_model.py similarity index 100% rename from src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_selected_model.py rename to src/gooey/types/video_bots_page_request_selected_model.py diff --git a/src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_translation_model.py b/src/gooey/types/video_bots_page_request_translation_model.py similarity index 100% rename from src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_translation_model.py rename to src/gooey/types/video_bots_page_request_translation_model.py diff --git a/src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_tts_provider.py b/src/gooey/types/video_bots_page_request_tts_provider.py similarity index 100% rename from src/gooey/copilot_for_your_enterprise/types/video_bots_page_request_tts_provider.py rename to src/gooey/types/video_bots_page_request_tts_provider.py