From 8c6351a0215827dea34174978a78d06a528e6eef Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 6 May 2025 03:46:57 +0000 Subject: [PATCH 01/16] chore: use lazy imports for resources --- src/groq/_client.py | 315 +++++++++++++++++++++++++++++++++++--------- 1 file changed, 253 insertions(+), 62 deletions(-) diff --git a/src/groq/_client.py b/src/groq/_client.py index 6c7f997..ee203f0 100644 --- a/src/groq/_client.py +++ b/src/groq/_client.py @@ -3,7 +3,7 @@ from __future__ import annotations import os -from typing import Any, Union, Mapping +from typing import TYPE_CHECKING, Any, Union, Mapping from typing_extensions import Self, override import httpx @@ -20,8 +20,8 @@ RequestOptions, ) from ._utils import is_given, get_async_library +from ._compat import cached_property from ._version import __version__ -from .resources import files, models, batches, embeddings from ._streaming import Stream as Stream, AsyncStream as AsyncStream from ._exceptions import GroqError, APIStatusError from ._base_client import ( @@ -29,22 +29,20 @@ SyncAPIClient, AsyncAPIClient, ) -from .resources.chat import chat -from .resources.audio import audio + +if TYPE_CHECKING: + from .resources import chat, audio, files, models, batches, embeddings + from .resources.files import Files, AsyncFiles + from .resources.models import Models, AsyncModels + from .resources.batches import Batches, AsyncBatches + from .resources.chat.chat import Chat, AsyncChat + from .resources.embeddings import Embeddings, AsyncEmbeddings + from .resources.audio.audio import Audio, AsyncAudio __all__ = ["Timeout", "Transport", "ProxiesTypes", "RequestOptions", "Groq", "AsyncGroq", "Client", "AsyncClient"] class Groq(SyncAPIClient): - chat: chat.Chat - embeddings: embeddings.Embeddings - audio: audio.Audio - models: models.Models - batches: batches.Batches - files: files.Files - with_raw_response: GroqWithRawResponse - with_streaming_response: GroqWithStreamedResponse - # client options api_key: str @@ -99,14 +97,49 @@ def __init__( _strict_response_validation=_strict_response_validation, ) - self.chat = chat.Chat(self) - self.embeddings = embeddings.Embeddings(self) - self.audio = audio.Audio(self) - self.models = models.Models(self) - self.batches = batches.Batches(self) - self.files = files.Files(self) - self.with_raw_response = GroqWithRawResponse(self) - self.with_streaming_response = GroqWithStreamedResponse(self) + @cached_property + def chat(self) -> Chat: + from .resources.chat import Chat + + return Chat(self) + + @cached_property + def embeddings(self) -> Embeddings: + from .resources.embeddings import Embeddings + + return Embeddings(self) + + @cached_property + def audio(self) -> Audio: + from .resources.audio import Audio + + return Audio(self) + + @cached_property + def models(self) -> Models: + from .resources.models import Models + + return Models(self) + + @cached_property + def batches(self) -> Batches: + from .resources.batches import Batches + + return Batches(self) + + @cached_property + def files(self) -> Files: + from .resources.files import Files + + return Files(self) + + @cached_property + def with_raw_response(self) -> GroqWithRawResponse: + return GroqWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> GroqWithStreamedResponse: + return GroqWithStreamedResponse(self) @property @override @@ -214,15 +247,6 @@ def _make_status_error( class AsyncGroq(AsyncAPIClient): - chat: chat.AsyncChat - embeddings: embeddings.AsyncEmbeddings - audio: audio.AsyncAudio - models: models.AsyncModels - batches: batches.AsyncBatches - files: files.AsyncFiles - with_raw_response: AsyncGroqWithRawResponse - with_streaming_response: AsyncGroqWithStreamedResponse - # client options api_key: str @@ -277,14 +301,49 @@ def __init__( _strict_response_validation=_strict_response_validation, ) - self.chat = chat.AsyncChat(self) - self.embeddings = embeddings.AsyncEmbeddings(self) - self.audio = audio.AsyncAudio(self) - self.models = models.AsyncModels(self) - self.batches = batches.AsyncBatches(self) - self.files = files.AsyncFiles(self) - self.with_raw_response = AsyncGroqWithRawResponse(self) - self.with_streaming_response = AsyncGroqWithStreamedResponse(self) + @cached_property + def chat(self) -> AsyncChat: + from .resources.chat import AsyncChat + + return AsyncChat(self) + + @cached_property + def embeddings(self) -> AsyncEmbeddings: + from .resources.embeddings import AsyncEmbeddings + + return AsyncEmbeddings(self) + + @cached_property + def audio(self) -> AsyncAudio: + from .resources.audio import AsyncAudio + + return AsyncAudio(self) + + @cached_property + def models(self) -> AsyncModels: + from .resources.models import AsyncModels + + return AsyncModels(self) + + @cached_property + def batches(self) -> AsyncBatches: + from .resources.batches import AsyncBatches + + return AsyncBatches(self) + + @cached_property + def files(self) -> AsyncFiles: + from .resources.files import AsyncFiles + + return AsyncFiles(self) + + @cached_property + def with_raw_response(self) -> AsyncGroqWithRawResponse: + return AsyncGroqWithRawResponse(self) + + @cached_property + def with_streaming_response(self) -> AsyncGroqWithStreamedResponse: + return AsyncGroqWithStreamedResponse(self) @property @override @@ -392,43 +451,175 @@ def _make_status_error( class GroqWithRawResponse: + _client: Groq + def __init__(self, client: Groq) -> None: - self.chat = chat.ChatWithRawResponse(client.chat) - self.embeddings = embeddings.EmbeddingsWithRawResponse(client.embeddings) - self.audio = audio.AudioWithRawResponse(client.audio) - self.models = models.ModelsWithRawResponse(client.models) - self.batches = batches.BatchesWithRawResponse(client.batches) - self.files = files.FilesWithRawResponse(client.files) + self._client = client + + @cached_property + def chat(self) -> chat.ChatWithRawResponse: + from .resources.chat import ChatWithRawResponse + + return ChatWithRawResponse(self._client.chat) + + @cached_property + def embeddings(self) -> embeddings.EmbeddingsWithRawResponse: + from .resources.embeddings import EmbeddingsWithRawResponse + + return EmbeddingsWithRawResponse(self._client.embeddings) + + @cached_property + def audio(self) -> audio.AudioWithRawResponse: + from .resources.audio import AudioWithRawResponse + + return AudioWithRawResponse(self._client.audio) + + @cached_property + def models(self) -> models.ModelsWithRawResponse: + from .resources.models import ModelsWithRawResponse + + return ModelsWithRawResponse(self._client.models) + + @cached_property + def batches(self) -> batches.BatchesWithRawResponse: + from .resources.batches import BatchesWithRawResponse + + return BatchesWithRawResponse(self._client.batches) + + @cached_property + def files(self) -> files.FilesWithRawResponse: + from .resources.files import FilesWithRawResponse + + return FilesWithRawResponse(self._client.files) class AsyncGroqWithRawResponse: + _client: AsyncGroq + def __init__(self, client: AsyncGroq) -> None: - self.chat = chat.AsyncChatWithRawResponse(client.chat) - self.embeddings = embeddings.AsyncEmbeddingsWithRawResponse(client.embeddings) - self.audio = audio.AsyncAudioWithRawResponse(client.audio) - self.models = models.AsyncModelsWithRawResponse(client.models) - self.batches = batches.AsyncBatchesWithRawResponse(client.batches) - self.files = files.AsyncFilesWithRawResponse(client.files) + self._client = client + + @cached_property + def chat(self) -> chat.AsyncChatWithRawResponse: + from .resources.chat import AsyncChatWithRawResponse + + return AsyncChatWithRawResponse(self._client.chat) + + @cached_property + def embeddings(self) -> embeddings.AsyncEmbeddingsWithRawResponse: + from .resources.embeddings import AsyncEmbeddingsWithRawResponse + + return AsyncEmbeddingsWithRawResponse(self._client.embeddings) + + @cached_property + def audio(self) -> audio.AsyncAudioWithRawResponse: + from .resources.audio import AsyncAudioWithRawResponse + + return AsyncAudioWithRawResponse(self._client.audio) + + @cached_property + def models(self) -> models.AsyncModelsWithRawResponse: + from .resources.models import AsyncModelsWithRawResponse + + return AsyncModelsWithRawResponse(self._client.models) + + @cached_property + def batches(self) -> batches.AsyncBatchesWithRawResponse: + from .resources.batches import AsyncBatchesWithRawResponse + + return AsyncBatchesWithRawResponse(self._client.batches) + + @cached_property + def files(self) -> files.AsyncFilesWithRawResponse: + from .resources.files import AsyncFilesWithRawResponse + + return AsyncFilesWithRawResponse(self._client.files) class GroqWithStreamedResponse: + _client: Groq + def __init__(self, client: Groq) -> None: - self.chat = chat.ChatWithStreamingResponse(client.chat) - self.embeddings = embeddings.EmbeddingsWithStreamingResponse(client.embeddings) - self.audio = audio.AudioWithStreamingResponse(client.audio) - self.models = models.ModelsWithStreamingResponse(client.models) - self.batches = batches.BatchesWithStreamingResponse(client.batches) - self.files = files.FilesWithStreamingResponse(client.files) + self._client = client + + @cached_property + def chat(self) -> chat.ChatWithStreamingResponse: + from .resources.chat import ChatWithStreamingResponse + + return ChatWithStreamingResponse(self._client.chat) + + @cached_property + def embeddings(self) -> embeddings.EmbeddingsWithStreamingResponse: + from .resources.embeddings import EmbeddingsWithStreamingResponse + + return EmbeddingsWithStreamingResponse(self._client.embeddings) + + @cached_property + def audio(self) -> audio.AudioWithStreamingResponse: + from .resources.audio import AudioWithStreamingResponse + + return AudioWithStreamingResponse(self._client.audio) + + @cached_property + def models(self) -> models.ModelsWithStreamingResponse: + from .resources.models import ModelsWithStreamingResponse + + return ModelsWithStreamingResponse(self._client.models) + + @cached_property + def batches(self) -> batches.BatchesWithStreamingResponse: + from .resources.batches import BatchesWithStreamingResponse + + return BatchesWithStreamingResponse(self._client.batches) + + @cached_property + def files(self) -> files.FilesWithStreamingResponse: + from .resources.files import FilesWithStreamingResponse + + return FilesWithStreamingResponse(self._client.files) class AsyncGroqWithStreamedResponse: + _client: AsyncGroq + def __init__(self, client: AsyncGroq) -> None: - self.chat = chat.AsyncChatWithStreamingResponse(client.chat) - self.embeddings = embeddings.AsyncEmbeddingsWithStreamingResponse(client.embeddings) - self.audio = audio.AsyncAudioWithStreamingResponse(client.audio) - self.models = models.AsyncModelsWithStreamingResponse(client.models) - self.batches = batches.AsyncBatchesWithStreamingResponse(client.batches) - self.files = files.AsyncFilesWithStreamingResponse(client.files) + self._client = client + + @cached_property + def chat(self) -> chat.AsyncChatWithStreamingResponse: + from .resources.chat import AsyncChatWithStreamingResponse + + return AsyncChatWithStreamingResponse(self._client.chat) + + @cached_property + def embeddings(self) -> embeddings.AsyncEmbeddingsWithStreamingResponse: + from .resources.embeddings import AsyncEmbeddingsWithStreamingResponse + + return AsyncEmbeddingsWithStreamingResponse(self._client.embeddings) + + @cached_property + def audio(self) -> audio.AsyncAudioWithStreamingResponse: + from .resources.audio import AsyncAudioWithStreamingResponse + + return AsyncAudioWithStreamingResponse(self._client.audio) + + @cached_property + def models(self) -> models.AsyncModelsWithStreamingResponse: + from .resources.models import AsyncModelsWithStreamingResponse + + return AsyncModelsWithStreamingResponse(self._client.models) + + @cached_property + def batches(self) -> batches.AsyncBatchesWithStreamingResponse: + from .resources.batches import AsyncBatchesWithStreamingResponse + + return AsyncBatchesWithStreamingResponse(self._client.batches) + + @cached_property + def files(self) -> files.AsyncFilesWithStreamingResponse: + from .resources.files import AsyncFilesWithStreamingResponse + + return AsyncFilesWithStreamingResponse(self._client.files) Client = Groq From ce3c2514e2e66557de3e583532743cb3806032a2 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 6 May 2025 23:04:56 +0000 Subject: [PATCH 02/16] feat(api): api update --- .stats.yml | 4 +- README.md | 22 ------ src/groq/resources/chat/completions.py | 26 +++---- .../types/chat/completion_create_params.py | 75 ++++++++++++++++--- tests/api_resources/chat/test_completions.py | 4 +- 5 files changed, 82 insertions(+), 49 deletions(-) diff --git a/.stats.yml b/.stats.yml index 7a7d45c..47a711c 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 17 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/groqcloud%2Fgroqcloud-e4cd6fe4e6ac62707635fac8fb7d966a0360868e467b578ddd7cc04a9459ff26.yml -openapi_spec_hash: e618e809624bb2f3b36995638c3ba791 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/groqcloud%2Fgroqcloud-e08e435f65aeb7c4e773503f15f04eb488d2cf6ec4e122f406e32599eb2861f3.yml +openapi_spec_hash: ff68a1ae02aa5cabe33c0cc0379f4611 config_hash: 6b1c374dcc1ffa3165dd22f52a77ff89 diff --git a/README.md b/README.md index 4321c9c..a465d1a 100644 --- a/README.md +++ b/README.md @@ -89,28 +89,6 @@ Nested request parameters are [TypedDicts](https://docs.python.org/3/library/typ Typed requests and responses provide autocomplete and documentation within your editor. If you would like to see type errors in VS Code to help catch bugs earlier, set `python.analysis.typeCheckingMode` to `basic`. -## Nested params - -Nested parameters are dictionaries, typed using `TypedDict`, for example: - -```python -from groq import Groq - -client = Groq() - -chat_completion = client.chat.completions.create( - messages=[ - { - "content": "content", - "role": "system", - } - ], - model="string", - response_format={"type": "json_object"}, -) -print(chat_completion.response_format) -``` - ## File uploads Request parameters that correspond to file uploads can be passed as `bytes`, or a [`PathLike`](https://docs.python.org/3/library/os.html#os.PathLike) instance or a tuple of `(filename, contents, media type)`. diff --git a/src/groq/resources/chat/completions.py b/src/groq/resources/chat/completions.py index 42576ea..bdabc10 100644 --- a/src/groq/resources/chat/completions.py +++ b/src/groq/resources/chat/completions.py @@ -283,13 +283,12 @@ def create( reasoning_format: Specifies how to output reasoning tokens - response_format: An object specifying the format that the model must output. - - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the - message the model generates is valid JSON. - - **Important:** when using JSON mode, you **must** also instruct the model to - produce JSON yourself via a system or user message. + response_format: An object specifying the format that the model must output. Setting to + `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs + which ensures the model will match your supplied JSON schema. Setting to + `{ "type": "json_object" }` enables the older JSON mode, which ensures the + message the model generates is valid JSON. Using `json_schema` is preferred for + models that support it. seed: If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return @@ -650,13 +649,12 @@ async def create( reasoning_format: Specifies how to output reasoning tokens - response_format: An object specifying the format that the model must output. - - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the - message the model generates is valid JSON. - - **Important:** when using JSON mode, you **must** also instruct the model to - produce JSON yourself via a system or user message. + response_format: An object specifying the format that the model must output. Setting to + `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs + which ensures the model will match your supplied JSON schema. Setting to + `{ "type": "json_object" }` enables the older JSON mode, which ensures the + message the model generates is valid JSON. Using `json_schema` is preferred for + models that support it. seed: If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return diff --git a/src/groq/types/chat/completion_create_params.py b/src/groq/types/chat/completion_create_params.py index 428fb5c..16cd6c9 100644 --- a/src/groq/types/chat/completion_create_params.py +++ b/src/groq/types/chat/completion_create_params.py @@ -11,7 +11,16 @@ from .chat_completion_tool_choice_option_param import ChatCompletionToolChoiceOptionParam from .chat_completion_function_call_option_param import ChatCompletionFunctionCallOptionParam -__all__ = ["CompletionCreateParams", "FunctionCall", "Function", "ResponseFormat"] +__all__ = [ + "CompletionCreateParams", + "FunctionCall", + "Function", + "ResponseFormat", + "ResponseFormatResponseFormatText", + "ResponseFormatResponseFormatJsonSchema", + "ResponseFormatResponseFormatJsonSchemaJsonSchema", + "ResponseFormatResponseFormatJsonObject", +] class CompletionCreateParams(TypedDict, total=False): @@ -128,11 +137,11 @@ class CompletionCreateParams(TypedDict, total=False): response_format: Optional[ResponseFormat] """An object specifying the format that the model must output. - Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the - message the model generates is valid JSON. - - **Important:** when using JSON mode, you **must** also instruct the model to - produce JSON yourself via a system or user message. + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which ensures the model will match your supplied JSON schema. Setting to + `{ "type": "json_object" }` enables the older JSON mode, which ensures the + message the model generates is valid JSON. Using `json_schema` is preferred for + models that support it. """ seed: Optional[int] @@ -249,6 +258,54 @@ class Function(TypedDict, total=False): """ -class ResponseFormat(TypedDict, total=False): - type: Literal["text", "json_object"] - """Must be one of `text` or `json_object`.""" +class ResponseFormatResponseFormatText(TypedDict, total=False): + type: Required[Literal["text"]] + """The type of response format being defined. Always `text`.""" + + +class ResponseFormatResponseFormatJsonSchemaJsonSchema(TypedDict, total=False): + name: Required[str] + """The name of the response format. + + Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length + of 64. + """ + + description: str + """ + A description of what the response format is for, used by the model to determine + how to respond in the format. + """ + + schema: Dict[str, object] + """ + The schema for the response format, described as a JSON Schema object. Learn how + to build JSON schemas [here](https://json-schema.org/). + """ + + strict: Optional[bool] + """Whether to enable strict schema adherence when generating the output. + + If set to true, the model will always follow the exact schema defined in the + `schema` field. Only a subset of JSON Schema is supported when `strict` is + `true`. To learn more, read the + [Structured Outputs guide](/docs/guides/structured-outputs). + """ + + +class ResponseFormatResponseFormatJsonSchema(TypedDict, total=False): + json_schema: Required[ResponseFormatResponseFormatJsonSchemaJsonSchema] + """Structured Outputs configuration options, including a JSON Schema.""" + + type: Required[Literal["json_schema"]] + """The type of response format being defined. Always `json_schema`.""" + + +class ResponseFormatResponseFormatJsonObject(TypedDict, total=False): + type: Required[Literal["json_object"]] + """The type of response format being defined. Always `json_object`.""" + + +ResponseFormat: TypeAlias = Union[ + ResponseFormatResponseFormatText, ResponseFormatResponseFormatJsonSchema, ResponseFormatResponseFormatJsonObject +] diff --git a/tests/api_resources/chat/test_completions.py b/tests/api_resources/chat/test_completions.py index 0f65ce0..d1380f3 100644 --- a/tests/api_resources/chat/test_completions.py +++ b/tests/api_resources/chat/test_completions.py @@ -61,7 +61,7 @@ def test_method_create_with_all_params(self, client: Groq) -> None: parallel_tool_calls=True, presence_penalty=-2, reasoning_format="hidden", - response_format={"type": "json_object"}, + response_format={"type": "text"}, seed=0, service_tier="auto", stop="\n", @@ -169,7 +169,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGroq) -> N parallel_tool_calls=True, presence_penalty=-2, reasoning_format="hidden", - response_format={"type": "json_object"}, + response_format={"type": "text"}, seed=0, service_tier="auto", stop="\n", From 7a04964d964feaac9dfcf1f46dad1f63a5acf714 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 9 May 2025 04:24:20 +0000 Subject: [PATCH 03/16] chore(internal): avoid errors for isinstance checks on proxies --- src/groq/_utils/_proxy.py | 5 ++++- tests/test_utils/test_proxy.py | 11 +++++++++++ 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/src/groq/_utils/_proxy.py b/src/groq/_utils/_proxy.py index ffd883e..0f239a3 100644 --- a/src/groq/_utils/_proxy.py +++ b/src/groq/_utils/_proxy.py @@ -46,7 +46,10 @@ def __dir__(self) -> Iterable[str]: @property # type: ignore @override def __class__(self) -> type: # pyright: ignore - proxied = self.__get_proxied__() + try: + proxied = self.__get_proxied__() + except Exception: + return type(self) if issubclass(type(proxied), LazyProxy): return type(proxied) return proxied.__class__ diff --git a/tests/test_utils/test_proxy.py b/tests/test_utils/test_proxy.py index 2fa9c4a..4c48c14 100644 --- a/tests/test_utils/test_proxy.py +++ b/tests/test_utils/test_proxy.py @@ -21,3 +21,14 @@ def test_recursive_proxy() -> None: assert dir(proxy) == [] assert type(proxy).__name__ == "RecursiveLazyProxy" assert type(operator.attrgetter("name.foo.bar.baz")(proxy)).__name__ == "RecursiveLazyProxy" + + +def test_isinstance_does_not_error() -> None: + class AlwaysErrorProxy(LazyProxy[Any]): + @override + def __load__(self) -> Any: + raise RuntimeError("Mocking missing dependency") + + proxy = AlwaysErrorProxy() + assert not isinstance(proxy, dict) + assert isinstance(proxy, LazyProxy) From 4dc027a35054db11eec6c8bfe1fd463b0c49db6e Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 9 May 2025 04:36:09 +0000 Subject: [PATCH 04/16] docs: remove or fix invalid readme examples --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index a465d1a..e37bf22 100644 --- a/README.md +++ b/README.md @@ -40,7 +40,7 @@ chat_completion = client.chat.completions.create( ], model="llama3-8b-8192", ) -print(chat_completion.choices[0].message.content) +print(chat_completion.id) ``` While you can provide an `api_key` keyword argument, @@ -72,7 +72,7 @@ async def main() -> None: ], model="llama3-8b-8192", ) - print(chat_completion.choices[0].message.content) + print(chat_completion.id) asyncio.run(main()) From 3ee87792c92196abba0a1c7d9400a34d95c58895 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Sat, 10 May 2025 03:43:36 +0000 Subject: [PATCH 05/16] fix(package): support direct resource imports --- src/groq/__init__.py | 5 +++++ src/groq/_utils/_resources_proxy.py | 24 ++++++++++++++++++++++++ 2 files changed, 29 insertions(+) create mode 100644 src/groq/_utils/_resources_proxy.py diff --git a/src/groq/__init__.py b/src/groq/__init__.py index 07f77a6..9ad2b0e 100644 --- a/src/groq/__init__.py +++ b/src/groq/__init__.py @@ -1,5 +1,7 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. +import typing as _t + from . import types from ._types import NOT_GIVEN, Omit, NoneType, NotGiven, Transport, ProxiesTypes from ._utils import file_from_path @@ -68,6 +70,9 @@ "DefaultAsyncHttpxClient", ] +if not _t.TYPE_CHECKING: + from ._utils._resources_proxy import resources as resources + _setup_logging() # Update the __module__ attribute for exported symbols so that diff --git a/src/groq/_utils/_resources_proxy.py b/src/groq/_utils/_resources_proxy.py new file mode 100644 index 0000000..4c4c876 --- /dev/null +++ b/src/groq/_utils/_resources_proxy.py @@ -0,0 +1,24 @@ +from __future__ import annotations + +from typing import Any +from typing_extensions import override + +from ._proxy import LazyProxy + + +class ResourcesProxy(LazyProxy[Any]): + """A proxy for the `groq.resources` module. + + This is used so that we can lazily import `groq.resources` only when + needed *and* so that users can just import `groq` and reference `groq.resources` + """ + + @override + def __load__(self) -> Any: + import importlib + + mod = importlib.import_module("groq.resources") + return mod + + +resources = ResourcesProxy().__as_proxied__() From b55a9e8d4e6fa56b537e01883be8d7ea7ce37032 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 14 May 2025 15:26:38 +0000 Subject: [PATCH 06/16] codegen metadata --- .stats.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.stats.yml b/.stats.yml index 47a711c..e93b2ac 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 17 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/groqcloud%2Fgroqcloud-e08e435f65aeb7c4e773503f15f04eb488d2cf6ec4e122f406e32599eb2861f3.yml -openapi_spec_hash: ff68a1ae02aa5cabe33c0cc0379f4611 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/groqcloud%2Fgroqcloud-321ea3c10bc0eb2b17407a99eb47e8ea88e67bf7bff8bebe3592fbd4f73b1f89.yml +openapi_spec_hash: d0f5f934d8a12f79db0cbdb7b6b3d0e3 config_hash: 6b1c374dcc1ffa3165dd22f52a77ff89 From 4bb39e59bbd317de95b3e1d896ab83ee41bc7b3e Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 15 May 2025 01:16:11 +0000 Subject: [PATCH 07/16] feat(api): api update --- .stats.yml | 4 ++-- src/groq/resources/chat/completions.py | 6 ++++-- src/groq/types/chat/completion_create_params.py | 3 ++- 3 files changed, 8 insertions(+), 5 deletions(-) diff --git a/.stats.yml b/.stats.yml index e93b2ac..a620388 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 17 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/groqcloud%2Fgroqcloud-321ea3c10bc0eb2b17407a99eb47e8ea88e67bf7bff8bebe3592fbd4f73b1f89.yml -openapi_spec_hash: d0f5f934d8a12f79db0cbdb7b6b3d0e3 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/groqcloud%2Fgroqcloud-98337e5b33a6b805acdfcd318f6acab8683c5b0afb1446cd0c62dff125fad4c0.yml +openapi_spec_hash: c4ac337673fc0f2bab417fbf379776ee config_hash: 6b1c374dcc1ffa3165dd22f52a77ff89 diff --git a/src/groq/resources/chat/completions.py b/src/groq/resources/chat/completions.py index bdabc10..2d80b43 100644 --- a/src/groq/resources/chat/completions.py +++ b/src/groq/resources/chat/completions.py @@ -285,7 +285,8 @@ def create( response_format: An object specifying the format that the model must output. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs - which ensures the model will match your supplied JSON schema. Setting to + which ensures the model will match your supplied JSON schema. json_schema + response format is only supported on llama 4 models. Setting to `{ "type": "json_object" }` enables the older JSON mode, which ensures the message the model generates is valid JSON. Using `json_schema` is preferred for models that support it. @@ -651,7 +652,8 @@ async def create( response_format: An object specifying the format that the model must output. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured Outputs - which ensures the model will match your supplied JSON schema. Setting to + which ensures the model will match your supplied JSON schema. json_schema + response format is only supported on llama 4 models. Setting to `{ "type": "json_object" }` enables the older JSON mode, which ensures the message the model generates is valid JSON. Using `json_schema` is preferred for models that support it. diff --git a/src/groq/types/chat/completion_create_params.py b/src/groq/types/chat/completion_create_params.py index 16cd6c9..ecd64df 100644 --- a/src/groq/types/chat/completion_create_params.py +++ b/src/groq/types/chat/completion_create_params.py @@ -138,7 +138,8 @@ class CompletionCreateParams(TypedDict, total=False): """An object specifying the format that the model must output. Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured - Outputs which ensures the model will match your supplied JSON schema. Setting to + Outputs which ensures the model will match your supplied JSON schema. + json_schema response format is only supported on llama 4 models. Setting to `{ "type": "json_object" }` enables the older JSON mode, which ensures the message the model generates is valid JSON. Using `json_schema` is preferred for models that support it. From 1f464f84bee361e07214e6b65a6727a1b5e0bc7b Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 15 May 2025 05:07:15 +0000 Subject: [PATCH 08/16] chore(ci): upload sdks to package manager --- .github/workflows/ci.yml | 24 ++++++++++++++++++++++++ scripts/utils/upload-artifact.sh | 25 +++++++++++++++++++++++++ 2 files changed, 49 insertions(+) create mode 100755 scripts/utils/upload-artifact.sh diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index f038932..d66d8f9 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -30,6 +30,30 @@ jobs: - name: Run lints run: ./scripts/lint + upload: + if: github.repository == 'stainless-sdks/groqcloud-python' + timeout-minutes: 10 + name: upload + permissions: + contents: read + id-token: write + runs-on: depot-ubuntu-24.04 + steps: + - uses: actions/checkout@v4 + + - name: Get GitHub OIDC Token + id: github-oidc + uses: actions/github-script@v6 + with: + script: core.setOutput('github_token', await core.getIDToken()); + + - name: Upload tarball + env: + URL: https://pkg.stainless.com/s + AUTH: ${{ steps.github-oidc.outputs.github_token }} + SHA: ${{ github.sha }} + run: ./scripts/utils/upload-artifact.sh + test: timeout-minutes: 10 name: test diff --git a/scripts/utils/upload-artifact.sh b/scripts/utils/upload-artifact.sh new file mode 100755 index 0000000..0fc13cb --- /dev/null +++ b/scripts/utils/upload-artifact.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash +set -exuo pipefail + +RESPONSE=$(curl -X POST "$URL" \ + -H "Authorization: Bearer $AUTH" \ + -H "Content-Type: application/json") + +SIGNED_URL=$(echo "$RESPONSE" | jq -r '.url') + +if [[ "$SIGNED_URL" == "null" ]]; then + echo -e "\033[31mFailed to get signed URL.\033[0m" + exit 1 +fi + +UPLOAD_RESPONSE=$(tar -cz . | curl -v -X PUT \ + -H "Content-Type: application/gzip" \ + --data-binary @- "$SIGNED_URL" 2>&1) + +if echo "$UPLOAD_RESPONSE" | grep -q "HTTP/[0-9.]* 200"; then + echo -e "\033[32mUploaded build to Stainless storage.\033[0m" + echo -e "\033[32mInstallation: npm install 'https://pkg.stainless.com/s/groqcloud-python/$SHA'\033[0m" +else + echo -e "\033[31mFailed to upload artifact.\033[0m" + exit 1 +fi From 3faa8c2bc10e74870c8a7c56087a423893b15370 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 15 May 2025 19:12:43 +0000 Subject: [PATCH 09/16] codegen metadata --- .stats.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.stats.yml b/.stats.yml index a620388..d0bb5a6 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 17 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/groqcloud%2Fgroqcloud-98337e5b33a6b805acdfcd318f6acab8683c5b0afb1446cd0c62dff125fad4c0.yml -openapi_spec_hash: c4ac337673fc0f2bab417fbf379776ee +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/groqcloud%2Fgroqcloud-1abe4e4e0313104a301ca3d94468647f1d0c020e43846c2b68871bdb01ab3eff.yml +openapi_spec_hash: 6c2114ab29431aed0dd7a07fdda6d431 config_hash: 6b1c374dcc1ffa3165dd22f52a77ff89 From efc8462d6069d40369ea4028b79aee8f1ec02499 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 15 May 2025 20:33:42 +0000 Subject: [PATCH 10/16] codegen metadata --- .stats.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.stats.yml b/.stats.yml index d0bb5a6..81a3906 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 17 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/groqcloud%2Fgroqcloud-1abe4e4e0313104a301ca3d94468647f1d0c020e43846c2b68871bdb01ab3eff.yml -openapi_spec_hash: 6c2114ab29431aed0dd7a07fdda6d431 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/groqcloud%2Fgroqcloud-2d749f836cb33a7f7206339104ccd35faeb0da880beb2a8743bec67eefc55095.yml +openapi_spec_hash: 6262e815941321bfe347d1537fac25d0 config_hash: 6b1c374dcc1ffa3165dd22f52a77ff89 From 454ff60a12d7e16127d87ef9c152b9e4a394246d Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 16 May 2025 03:53:40 +0000 Subject: [PATCH 11/16] chore(ci): fix installation instructions --- scripts/utils/upload-artifact.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/utils/upload-artifact.sh b/scripts/utils/upload-artifact.sh index 0fc13cb..7da1e1b 100755 --- a/scripts/utils/upload-artifact.sh +++ b/scripts/utils/upload-artifact.sh @@ -18,7 +18,7 @@ UPLOAD_RESPONSE=$(tar -cz . | curl -v -X PUT \ if echo "$UPLOAD_RESPONSE" | grep -q "HTTP/[0-9.]* 200"; then echo -e "\033[32mUploaded build to Stainless storage.\033[0m" - echo -e "\033[32mInstallation: npm install 'https://pkg.stainless.com/s/groqcloud-python/$SHA'\033[0m" + echo -e "\033[32mInstallation: pip install 'https://pkg.stainless.com/s/groqcloud-python/$SHA'\033[0m" else echo -e "\033[31mFailed to upload artifact.\033[0m" exit 1 From e3acb618e09864829089575c51ec617b2f4b6975 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 16 May 2025 04:32:06 +0000 Subject: [PATCH 12/16] feat(api): api update --- .stats.yml | 4 +-- README.md | 26 +++++++++++++++ src/groq/resources/chat/completions.py | 24 +++++++++----- .../types/chat/chat_completion_message.py | 33 ++++++++++++++++++- .../types/chat/completion_create_params.py | 23 ++++++++++--- tests/api_resources/chat/test_completions.py | 10 ++++++ 6 files changed, 105 insertions(+), 15 deletions(-) diff --git a/.stats.yml b/.stats.yml index 81a3906..3a552aa 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 17 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/groqcloud%2Fgroqcloud-2d749f836cb33a7f7206339104ccd35faeb0da880beb2a8743bec67eefc55095.yml -openapi_spec_hash: 6262e815941321bfe347d1537fac25d0 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/groqcloud%2Fgroqcloud-66cde2321f98e4359dd3df749ddaf02abf371b19b126f1defa314d18f178c88d.yml +openapi_spec_hash: aa85981d19627dff8b175a7fd60d575d config_hash: 6b1c374dcc1ffa3165dd22f52a77ff89 diff --git a/README.md b/README.md index e37bf22..0938430 100644 --- a/README.md +++ b/README.md @@ -89,6 +89,32 @@ Nested request parameters are [TypedDicts](https://docs.python.org/3/library/typ Typed requests and responses provide autocomplete and documentation within your editor. If you would like to see type errors in VS Code to help catch bugs earlier, set `python.analysis.typeCheckingMode` to `basic`. +## Nested params + +Nested parameters are dictionaries, typed using `TypedDict`, for example: + +```python +from groq import Groq + +client = Groq() + +chat_completion = client.chat.completions.create( + messages=[ + { + "content": "content", + "role": "system", + } + ], + model="string", + search_settings={ + "exclude_domains": ["string"], + "include_domains": ["string"], + "include_images": True, + }, +) +print(chat_completion.search_settings) +``` + ## File uploads Request parameters that correspond to file uploads can be passed as `bytes`, or a [`PathLike`](https://docs.python.org/3/library/os.html#os.PathLike) instance or a tuple of `(filename, contents, media type)`. diff --git a/src/groq/resources/chat/completions.py b/src/groq/resources/chat/completions.py index 2d80b43..f6f69d6 100644 --- a/src/groq/resources/chat/completions.py +++ b/src/groq/resources/chat/completions.py @@ -202,6 +202,7 @@ def create( presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, reasoning_format: Optional[Literal["hidden", "raw", "parsed"]] | NotGiven = NOT_GIVEN, response_format: Optional[completion_create_params.ResponseFormat] | NotGiven = NOT_GIVEN, + search_settings: Optional[completion_create_params.SearchSettings] | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "on_demand", "flex"]] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, @@ -229,8 +230,8 @@ def create( model: ID of the model to use. For details on which models are compatible with the Chat API, see available [models](https://console.groq.com/docs/models) - exclude_domains: A list of domains to exclude from the search results when the model uses a web - search tool. + exclude_domains: Deprecated: Use search_settings.exclude_domains instead. A list of domains to + exclude from the search results when the model uses a web search tool. frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to @@ -251,8 +252,8 @@ def create( A list of functions the model may generate JSON inputs for. - include_domains: A list of domains to include in the search results when the model uses a web - search tool. + include_domains: Deprecated: Use search_settings.include_domains instead. A list of domains to + include in the search results when the model uses a web search tool. logit_bias: This is not yet supported by any of our models. Modify the likelihood of specified tokens appearing in the completion. @@ -291,6 +292,8 @@ def create( message the model generates is valid JSON. Using `json_schema` is preferred for models that support it. + search_settings: Settings for web search functionality when the model uses a web search tool. + seed: If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. Determinism is not guaranteed, and you should refer to the @@ -373,6 +376,7 @@ def create( "presence_penalty": presence_penalty, "reasoning_format": reasoning_format, "response_format": response_format, + "search_settings": search_settings, "seed": seed, "service_tier": service_tier, "stop": stop, @@ -569,6 +573,7 @@ async def create( presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, reasoning_format: Optional[Literal["hidden", "raw", "parsed"]] | NotGiven = NOT_GIVEN, response_format: Optional[completion_create_params.ResponseFormat] | NotGiven = NOT_GIVEN, + search_settings: Optional[completion_create_params.SearchSettings] | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "on_demand", "flex"]] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, @@ -596,8 +601,8 @@ async def create( model: ID of the model to use. For details on which models are compatible with the Chat API, see available [models](https://console.groq.com/docs/models) - exclude_domains: A list of domains to exclude from the search results when the model uses a web - search tool. + exclude_domains: Deprecated: Use search_settings.exclude_domains instead. A list of domains to + exclude from the search results when the model uses a web search tool. frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to @@ -618,8 +623,8 @@ async def create( A list of functions the model may generate JSON inputs for. - include_domains: A list of domains to include in the search results when the model uses a web - search tool. + include_domains: Deprecated: Use search_settings.include_domains instead. A list of domains to + include in the search results when the model uses a web search tool. logit_bias: This is not yet supported by any of our models. Modify the likelihood of specified tokens appearing in the completion. @@ -658,6 +663,8 @@ async def create( message the model generates is valid JSON. Using `json_schema` is preferred for models that support it. + search_settings: Settings for web search functionality when the model uses a web search tool. + seed: If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. Determinism is not guaranteed, and you should refer to the @@ -740,6 +747,7 @@ async def create( "presence_penalty": presence_penalty, "reasoning_format": reasoning_format, "response_format": response_format, + "search_settings": search_settings, "seed": seed, "service_tier": service_tier, "stop": stop, diff --git a/src/groq/types/chat/chat_completion_message.py b/src/groq/types/chat/chat_completion_message.py index 4212e1d..fa1c55f 100644 --- a/src/groq/types/chat/chat_completion_message.py +++ b/src/groq/types/chat/chat_completion_message.py @@ -6,7 +6,35 @@ from ..._models import BaseModel from .chat_completion_message_tool_call import ChatCompletionMessageToolCall -__all__ = ["ChatCompletionMessage", "ExecutedTool", "FunctionCall"] +__all__ = [ + "ChatCompletionMessage", + "ExecutedTool", + "ExecutedToolSearchResults", + "ExecutedToolSearchResultsResult", + "FunctionCall", +] + + +class ExecutedToolSearchResultsResult(BaseModel): + content: Optional[str] = None + """The content of the search result""" + + score: Optional[float] = None + """The relevance score of the search result""" + + title: Optional[str] = None + """The title of the search result""" + + url: Optional[str] = None + """The URL of the search result""" + + +class ExecutedToolSearchResults(BaseModel): + images: Optional[List[str]] = None + """List of image URLs returned by the search""" + + results: Optional[List[ExecutedToolSearchResultsResult]] = None + """List of search results""" class ExecutedTool(BaseModel): @@ -22,6 +50,9 @@ class ExecutedTool(BaseModel): output: Optional[str] = None """The output returned by the tool.""" + search_results: Optional[ExecutedToolSearchResults] = None + """The search results returned by the tool, if applicable.""" + class FunctionCall(BaseModel): arguments: str diff --git a/src/groq/types/chat/completion_create_params.py b/src/groq/types/chat/completion_create_params.py index ecd64df..c563858 100644 --- a/src/groq/types/chat/completion_create_params.py +++ b/src/groq/types/chat/completion_create_params.py @@ -20,6 +20,7 @@ "ResponseFormatResponseFormatJsonSchema", "ResponseFormatResponseFormatJsonSchemaJsonSchema", "ResponseFormatResponseFormatJsonObject", + "SearchSettings", ] @@ -48,8 +49,8 @@ class CompletionCreateParams(TypedDict, total=False): exclude_domains: Optional[List[str]] """ - A list of domains to exclude from the search results when the model uses a web - search tool. + Deprecated: Use search_settings.exclude_domains instead. A list of domains to + exclude from the search results when the model uses a web search tool. """ frequency_penalty: Optional[float] @@ -80,8 +81,8 @@ class CompletionCreateParams(TypedDict, total=False): include_domains: Optional[List[str]] """ - A list of domains to include in the search results when the model uses a web - search tool. + Deprecated: Use search_settings.include_domains instead. A list of domains to + include in the search results when the model uses a web search tool. """ logit_bias: Optional[Dict[str, int]] @@ -145,6 +146,9 @@ class CompletionCreateParams(TypedDict, total=False): models that support it. """ + search_settings: Optional[SearchSettings] + """Settings for web search functionality when the model uses a web search tool.""" + seed: Optional[int] """ If specified, our system will make a best effort to sample deterministically, @@ -310,3 +314,14 @@ class ResponseFormatResponseFormatJsonObject(TypedDict, total=False): ResponseFormat: TypeAlias = Union[ ResponseFormatResponseFormatText, ResponseFormatResponseFormatJsonSchema, ResponseFormatResponseFormatJsonObject ] + + +class SearchSettings(TypedDict, total=False): + exclude_domains: Optional[List[str]] + """A list of domains to exclude from the search results.""" + + include_domains: Optional[List[str]] + """A list of domains to include in the search results.""" + + include_images: Optional[bool] + """Whether to include images in the search results.""" diff --git a/tests/api_resources/chat/test_completions.py b/tests/api_resources/chat/test_completions.py index d1380f3..5b3b03d 100644 --- a/tests/api_resources/chat/test_completions.py +++ b/tests/api_resources/chat/test_completions.py @@ -62,6 +62,11 @@ def test_method_create_with_all_params(self, client: Groq) -> None: presence_penalty=-2, reasoning_format="hidden", response_format={"type": "text"}, + search_settings={ + "exclude_domains": ["string"], + "include_domains": ["string"], + "include_images": True, + }, seed=0, service_tier="auto", stop="\n", @@ -170,6 +175,11 @@ async def test_method_create_with_all_params(self, async_client: AsyncGroq) -> N presence_penalty=-2, reasoning_format="hidden", response_format={"type": "text"}, + search_settings={ + "exclude_domains": ["string"], + "include_domains": ["string"], + "include_images": True, + }, seed=0, service_tier="auto", stop="\n", From 385969a478242c67dd07e510632ea1f168f743d8 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 16 May 2025 19:39:33 +0000 Subject: [PATCH 13/16] feat(api): api update --- .stats.yml | 4 +- README.md | 4 +- src/groq/resources/audio/speech.py | 5 +- src/groq/resources/audio/transcriptions.py | 10 ++-- src/groq/resources/audio/translations.py | 10 ++-- src/groq/types/audio/speech_create_params.py | 3 +- .../audio/transcription_create_params.py | 7 ++- .../types/audio/translation_create_params.py | 7 ++- tests/api_resources/audio/test_speech.py | 48 +++++++++---------- .../audio/test_transcriptions.py | 16 +++---- .../api_resources/audio/test_translations.py | 16 +++---- tests/api_resources/chat/test_completions.py | 16 +++---- tests/test_client.py | 12 ++--- 13 files changed, 85 insertions(+), 73 deletions(-) diff --git a/.stats.yml b/.stats.yml index 3a552aa..d394724 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 17 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/groqcloud%2Fgroqcloud-66cde2321f98e4359dd3df749ddaf02abf371b19b126f1defa314d18f178c88d.yml -openapi_spec_hash: aa85981d19627dff8b175a7fd60d575d +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/groqcloud%2Fgroqcloud-ee34f94100e35d728e92c54940b84a46f420f476a4b82a33a21728ebf1e9032f.yml +openapi_spec_hash: 5d642c8432d9963281e7db786c2b4e6c config_hash: 6b1c374dcc1ffa3165dd22f52a77ff89 diff --git a/README.md b/README.md index 0938430..8b7582c 100644 --- a/README.md +++ b/README.md @@ -105,7 +105,7 @@ chat_completion = client.chat.completions.create( "role": "system", } ], - model="string", + model="meta-llama/llama-4-scout-17b-16e-instruct", search_settings={ "exclude_domains": ["string"], "include_domains": ["string"], @@ -126,7 +126,7 @@ from groq import Groq client = Groq() client.audio.transcriptions.create( - model="whisper-large-v3", + model="whisper-large-v3-turbo", file=Path("/path/to/file"), ) ``` diff --git a/src/groq/resources/audio/speech.py b/src/groq/resources/audio/speech.py index 0c3b639..8a842c2 100644 --- a/src/groq/resources/audio/speech.py +++ b/src/groq/resources/audio/speech.py @@ -2,6 +2,7 @@ from __future__ import annotations +from typing import Union from typing_extensions import Literal import httpx @@ -50,7 +51,7 @@ def create( self, *, input: str, - model: str, + model: Union[str, Literal["playai-tts", "playai-tts-arabic"]], voice: str, response_format: Literal["wav", "mp3"] | NotGiven = NOT_GIVEN, speed: float | NotGiven = NOT_GIVEN, @@ -128,7 +129,7 @@ async def create( self, *, input: str, - model: str, + model: Union[str, Literal["playai-tts", "playai-tts-arabic"]], voice: str, response_format: Literal["wav", "mp3"] | NotGiven = NOT_GIVEN, speed: float | NotGiven = NOT_GIVEN, diff --git a/src/groq/resources/audio/transcriptions.py b/src/groq/resources/audio/transcriptions.py index ee2740f..a21683a 100644 --- a/src/groq/resources/audio/transcriptions.py +++ b/src/groq/resources/audio/transcriptions.py @@ -47,7 +47,7 @@ def with_streaming_response(self) -> TranscriptionsWithStreamingResponse: def create( self, *, - model: Union[str, Literal["whisper-large-v3"]], + model: Union[str, Literal["whisper-large-v3", "whisper-large-v3-turbo"]], file: FileTypes | NotGiven = NOT_GIVEN, language: Union[ str, @@ -171,7 +171,8 @@ def create( Transcribes audio into the input language. Args: - model: ID of the model to use. Only `whisper-large-v3` is currently available. + model: ID of the model to use. `whisper-large-v3` and `whisper-large-v3-turbo` are + currently available. file: The audio file object (not file name) to transcribe, in one of these formats: @@ -263,7 +264,7 @@ def with_streaming_response(self) -> AsyncTranscriptionsWithStreamingResponse: async def create( self, *, - model: Union[str, Literal["whisper-large-v3"]], + model: Union[str, Literal["whisper-large-v3", "whisper-large-v3-turbo"]], file: FileTypes | NotGiven = NOT_GIVEN, language: Union[ str, @@ -387,7 +388,8 @@ async def create( Transcribes audio into the input language. Args: - model: ID of the model to use. Only `whisper-large-v3` is currently available. + model: ID of the model to use. `whisper-large-v3` and `whisper-large-v3-turbo` are + currently available. file: The audio file object (not file name) to transcribe, in one of these formats: diff --git a/src/groq/resources/audio/translations.py b/src/groq/resources/audio/translations.py index d422676..ac63c29 100644 --- a/src/groq/resources/audio/translations.py +++ b/src/groq/resources/audio/translations.py @@ -47,7 +47,7 @@ def with_streaming_response(self) -> TranslationsWithStreamingResponse: def create( self, *, - model: Union[str, Literal["whisper-large-v3"]], + model: Union[str, Literal["whisper-large-v3", "whisper-large-v3-turbo"]], file: FileTypes | NotGiven = NOT_GIVEN, prompt: str | NotGiven = NOT_GIVEN, response_format: Literal["json", "text", "verbose_json"] | NotGiven = NOT_GIVEN, @@ -65,7 +65,8 @@ def create( Args: model: ID of the model to use. - Only `whisper-large-v3` is currently available. + `whisper-large-v3` and `whisper-large-v3-turbo` are + currently available. file: The audio file object (not file name) translate, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. @@ -143,7 +144,7 @@ def with_streaming_response(self) -> AsyncTranslationsWithStreamingResponse: async def create( self, *, - model: Union[str, Literal["whisper-large-v3"]], + model: Union[str, Literal["whisper-large-v3", "whisper-large-v3-turbo"]], file: FileTypes | NotGiven = NOT_GIVEN, prompt: str | NotGiven = NOT_GIVEN, response_format: Literal["json", "text", "verbose_json"] | NotGiven = NOT_GIVEN, @@ -161,7 +162,8 @@ async def create( Args: model: ID of the model to use. - Only `whisper-large-v3` is currently available. + `whisper-large-v3` and `whisper-large-v3-turbo` are + currently available. file: The audio file object (not file name) translate, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. diff --git a/src/groq/types/audio/speech_create_params.py b/src/groq/types/audio/speech_create_params.py index 8972082..ff31e03 100644 --- a/src/groq/types/audio/speech_create_params.py +++ b/src/groq/types/audio/speech_create_params.py @@ -2,6 +2,7 @@ from __future__ import annotations +from typing import Union from typing_extensions import Literal, Required, TypedDict __all__ = ["SpeechCreateParams"] @@ -11,7 +12,7 @@ class SpeechCreateParams(TypedDict, total=False): input: Required[str] """The text to generate audio for.""" - model: Required[str] + model: Required[Union[str, Literal["playai-tts", "playai-tts-arabic"]]] """One of the [available TTS models](/docs/text-to-speech).""" voice: Required[str] diff --git a/src/groq/types/audio/transcription_create_params.py b/src/groq/types/audio/transcription_create_params.py index 4752f2d..1837470 100644 --- a/src/groq/types/audio/transcription_create_params.py +++ b/src/groq/types/audio/transcription_create_params.py @@ -11,8 +11,11 @@ class TranscriptionCreateParams(TypedDict, total=False): - model: Required[Union[str, Literal["whisper-large-v3"]]] - """ID of the model to use. Only `whisper-large-v3` is currently available.""" + model: Required[Union[str, Literal["whisper-large-v3", "whisper-large-v3-turbo"]]] + """ID of the model to use. + + `whisper-large-v3` and `whisper-large-v3-turbo` are currently available. + """ file: FileTypes """ diff --git a/src/groq/types/audio/translation_create_params.py b/src/groq/types/audio/translation_create_params.py index 7ee76e2..c5894cc 100644 --- a/src/groq/types/audio/translation_create_params.py +++ b/src/groq/types/audio/translation_create_params.py @@ -11,8 +11,11 @@ class TranslationCreateParams(TypedDict, total=False): - model: Required[Union[str, Literal["whisper-large-v3"]]] - """ID of the model to use. Only `whisper-large-v3` is currently available.""" + model: Required[Union[str, Literal["whisper-large-v3", "whisper-large-v3-turbo"]]] + """ID of the model to use. + + `whisper-large-v3` and `whisper-large-v3-turbo` are currently available. + """ file: FileTypes """ diff --git a/tests/api_resources/audio/test_speech.py b/tests/api_resources/audio/test_speech.py index d2fa986..ee22ed6 100644 --- a/tests/api_resources/audio/test_speech.py +++ b/tests/api_resources/audio/test_speech.py @@ -28,9 +28,9 @@ class TestSpeech: def test_method_create(self, client: Groq, respx_mock: MockRouter) -> None: respx_mock.post("/openai/v1/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"})) speech = client.audio.speech.create( - input="input", - model="model", - voice="voice", + input="The quick brown fox jumped over the lazy dog", + model="playai-tts", + voice="Fritz-PlayAI", ) assert speech.is_closed assert speech.json() == {"foo": "bar"} @@ -42,9 +42,9 @@ def test_method_create(self, client: Groq, respx_mock: MockRouter) -> None: def test_method_create_with_all_params(self, client: Groq, respx_mock: MockRouter) -> None: respx_mock.post("/openai/v1/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"})) speech = client.audio.speech.create( - input="input", - model="model", - voice="voice", + input="The quick brown fox jumped over the lazy dog", + model="playai-tts", + voice="Fritz-PlayAI", response_format="wav", speed=1, ) @@ -59,9 +59,9 @@ def test_raw_response_create(self, client: Groq, respx_mock: MockRouter) -> None respx_mock.post("/openai/v1/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"})) speech = client.audio.speech.with_raw_response.create( - input="input", - model="model", - voice="voice", + input="The quick brown fox jumped over the lazy dog", + model="playai-tts", + voice="Fritz-PlayAI", ) assert speech.is_closed is True @@ -74,9 +74,9 @@ def test_raw_response_create(self, client: Groq, respx_mock: MockRouter) -> None def test_streaming_response_create(self, client: Groq, respx_mock: MockRouter) -> None: respx_mock.post("/openai/v1/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"})) with client.audio.speech.with_streaming_response.create( - input="input", - model="model", - voice="voice", + input="The quick brown fox jumped over the lazy dog", + model="playai-tts", + voice="Fritz-PlayAI", ) as speech: assert not speech.is_closed assert speech.http_request.headers.get("X-Stainless-Lang") == "python" @@ -96,9 +96,9 @@ class TestAsyncSpeech: async def test_method_create(self, async_client: AsyncGroq, respx_mock: MockRouter) -> None: respx_mock.post("/openai/v1/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"})) speech = await async_client.audio.speech.create( - input="input", - model="model", - voice="voice", + input="The quick brown fox jumped over the lazy dog", + model="playai-tts", + voice="Fritz-PlayAI", ) assert speech.is_closed assert await speech.json() == {"foo": "bar"} @@ -110,9 +110,9 @@ async def test_method_create(self, async_client: AsyncGroq, respx_mock: MockRout async def test_method_create_with_all_params(self, async_client: AsyncGroq, respx_mock: MockRouter) -> None: respx_mock.post("/openai/v1/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"})) speech = await async_client.audio.speech.create( - input="input", - model="model", - voice="voice", + input="The quick brown fox jumped over the lazy dog", + model="playai-tts", + voice="Fritz-PlayAI", response_format="wav", speed=1, ) @@ -127,9 +127,9 @@ async def test_raw_response_create(self, async_client: AsyncGroq, respx_mock: Mo respx_mock.post("/openai/v1/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"})) speech = await async_client.audio.speech.with_raw_response.create( - input="input", - model="model", - voice="voice", + input="The quick brown fox jumped over the lazy dog", + model="playai-tts", + voice="Fritz-PlayAI", ) assert speech.is_closed is True @@ -142,9 +142,9 @@ async def test_raw_response_create(self, async_client: AsyncGroq, respx_mock: Mo async def test_streaming_response_create(self, async_client: AsyncGroq, respx_mock: MockRouter) -> None: respx_mock.post("/openai/v1/audio/speech").mock(return_value=httpx.Response(200, json={"foo": "bar"})) async with async_client.audio.speech.with_streaming_response.create( - input="input", - model="model", - voice="voice", + input="The quick brown fox jumped over the lazy dog", + model="playai-tts", + voice="Fritz-PlayAI", ) as speech: assert not speech.is_closed assert speech.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/api_resources/audio/test_transcriptions.py b/tests/api_resources/audio/test_transcriptions.py index 342d166..b70ec58 100644 --- a/tests/api_resources/audio/test_transcriptions.py +++ b/tests/api_resources/audio/test_transcriptions.py @@ -21,7 +21,7 @@ class TestTranscriptions: @parametrize def test_method_create(self, client: Groq) -> None: transcription = client.audio.transcriptions.create( - model="whisper-large-v3", + model="whisper-large-v3-turbo", ) assert_matches_type(Transcription, transcription, path=["response"]) @@ -29,7 +29,7 @@ def test_method_create(self, client: Groq) -> None: @parametrize def test_method_create_with_all_params(self, client: Groq) -> None: transcription = client.audio.transcriptions.create( - model="whisper-large-v3", + model="whisper-large-v3-turbo", file=b"raw file contents", language="string", prompt="prompt", @@ -44,7 +44,7 @@ def test_method_create_with_all_params(self, client: Groq) -> None: @parametrize def test_raw_response_create(self, client: Groq) -> None: response = client.audio.transcriptions.with_raw_response.create( - model="whisper-large-v3", + model="whisper-large-v3-turbo", ) assert response.is_closed is True @@ -56,7 +56,7 @@ def test_raw_response_create(self, client: Groq) -> None: @parametrize def test_streaming_response_create(self, client: Groq) -> None: with client.audio.transcriptions.with_streaming_response.create( - model="whisper-large-v3", + model="whisper-large-v3-turbo", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -74,7 +74,7 @@ class TestAsyncTranscriptions: @parametrize async def test_method_create(self, async_client: AsyncGroq) -> None: transcription = await async_client.audio.transcriptions.create( - model="whisper-large-v3", + model="whisper-large-v3-turbo", ) assert_matches_type(Transcription, transcription, path=["response"]) @@ -82,7 +82,7 @@ async def test_method_create(self, async_client: AsyncGroq) -> None: @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGroq) -> None: transcription = await async_client.audio.transcriptions.create( - model="whisper-large-v3", + model="whisper-large-v3-turbo", file=b"raw file contents", language="string", prompt="prompt", @@ -97,7 +97,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGroq) -> N @parametrize async def test_raw_response_create(self, async_client: AsyncGroq) -> None: response = await async_client.audio.transcriptions.with_raw_response.create( - model="whisper-large-v3", + model="whisper-large-v3-turbo", ) assert response.is_closed is True @@ -109,7 +109,7 @@ async def test_raw_response_create(self, async_client: AsyncGroq) -> None: @parametrize async def test_streaming_response_create(self, async_client: AsyncGroq) -> None: async with async_client.audio.transcriptions.with_streaming_response.create( - model="whisper-large-v3", + model="whisper-large-v3-turbo", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/api_resources/audio/test_translations.py b/tests/api_resources/audio/test_translations.py index 1a6645c..d9858cf 100644 --- a/tests/api_resources/audio/test_translations.py +++ b/tests/api_resources/audio/test_translations.py @@ -21,7 +21,7 @@ class TestTranslations: @parametrize def test_method_create(self, client: Groq) -> None: translation = client.audio.translations.create( - model="whisper-1", + model="whisper-large-v3-turbo", ) assert_matches_type(Translation, translation, path=["response"]) @@ -29,7 +29,7 @@ def test_method_create(self, client: Groq) -> None: @parametrize def test_method_create_with_all_params(self, client: Groq) -> None: translation = client.audio.translations.create( - model="whisper-1", + model="whisper-large-v3-turbo", file=b"raw file contents", prompt="prompt", response_format="json", @@ -42,7 +42,7 @@ def test_method_create_with_all_params(self, client: Groq) -> None: @parametrize def test_raw_response_create(self, client: Groq) -> None: response = client.audio.translations.with_raw_response.create( - model="whisper-1", + model="whisper-large-v3-turbo", ) assert response.is_closed is True @@ -54,7 +54,7 @@ def test_raw_response_create(self, client: Groq) -> None: @parametrize def test_streaming_response_create(self, client: Groq) -> None: with client.audio.translations.with_streaming_response.create( - model="whisper-1", + model="whisper-large-v3-turbo", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -72,7 +72,7 @@ class TestAsyncTranslations: @parametrize async def test_method_create(self, async_client: AsyncGroq) -> None: translation = await async_client.audio.translations.create( - model="whisper-1", + model="whisper-large-v3-turbo", ) assert_matches_type(Translation, translation, path=["response"]) @@ -80,7 +80,7 @@ async def test_method_create(self, async_client: AsyncGroq) -> None: @parametrize async def test_method_create_with_all_params(self, async_client: AsyncGroq) -> None: translation = await async_client.audio.translations.create( - model="whisper-1", + model="whisper-large-v3-turbo", file=b"raw file contents", prompt="prompt", response_format="json", @@ -93,7 +93,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGroq) -> N @parametrize async def test_raw_response_create(self, async_client: AsyncGroq) -> None: response = await async_client.audio.translations.with_raw_response.create( - model="whisper-1", + model="whisper-large-v3-turbo", ) assert response.is_closed is True @@ -105,7 +105,7 @@ async def test_raw_response_create(self, async_client: AsyncGroq) -> None: @parametrize async def test_streaming_response_create(self, async_client: AsyncGroq) -> None: async with async_client.audio.translations.with_streaming_response.create( - model="whisper-1", + model="whisper-large-v3-turbo", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/api_resources/chat/test_completions.py b/tests/api_resources/chat/test_completions.py index 5b3b03d..d1d422e 100644 --- a/tests/api_resources/chat/test_completions.py +++ b/tests/api_resources/chat/test_completions.py @@ -26,7 +26,7 @@ def test_method_create(self, client: Groq) -> None: "role": "system", } ], - model="string", + model="meta-llama/llama-4-scout-17b-16e-instruct", ) assert_matches_type(ChatCompletion, completion, path=["response"]) @@ -40,7 +40,7 @@ def test_method_create_with_all_params(self, client: Groq) -> None: "name": "name", } ], - model="string", + model="meta-llama/llama-4-scout-17b-16e-instruct", exclude_domains=["string"], frequency_penalty=-2, function_call="none", @@ -99,7 +99,7 @@ def test_raw_response_create(self, client: Groq) -> None: "role": "system", } ], - model="string", + model="meta-llama/llama-4-scout-17b-16e-instruct", ) assert response.is_closed is True @@ -116,7 +116,7 @@ def test_streaming_response_create(self, client: Groq) -> None: "role": "system", } ], - model="string", + model="meta-llama/llama-4-scout-17b-16e-instruct", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -139,7 +139,7 @@ async def test_method_create(self, async_client: AsyncGroq) -> None: "role": "system", } ], - model="string", + model="meta-llama/llama-4-scout-17b-16e-instruct", ) assert_matches_type(ChatCompletion, completion, path=["response"]) @@ -153,7 +153,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGroq) -> N "name": "name", } ], - model="string", + model="meta-llama/llama-4-scout-17b-16e-instruct", exclude_domains=["string"], frequency_penalty=-2, function_call="none", @@ -212,7 +212,7 @@ async def test_raw_response_create(self, async_client: AsyncGroq) -> None: "role": "system", } ], - model="string", + model="meta-llama/llama-4-scout-17b-16e-instruct", ) assert response.is_closed is True @@ -229,7 +229,7 @@ async def test_streaming_response_create(self, async_client: AsyncGroq) -> None: "role": "system", } ], - model="string", + model="meta-llama/llama-4-scout-17b-16e-instruct", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" diff --git a/tests/test_client.py b/tests/test_client.py index 500322e..78d8087 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -797,7 +797,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: "role": "system", } ], - model="string", + model="meta-llama/llama-4-scout-17b-16e-instruct", ) assert response.retries_taken == failures_before_success @@ -827,7 +827,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: "role": "system", } ], - model="string", + model="meta-llama/llama-4-scout-17b-16e-instruct", extra_headers={"x-stainless-retry-count": Omit()}, ) @@ -859,7 +859,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: "role": "system", } ], - model="string", + model="meta-llama/llama-4-scout-17b-16e-instruct", extra_headers={"x-stainless-retry-count": "42"}, ) @@ -1624,7 +1624,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: "role": "system", } ], - model="string", + model="meta-llama/llama-4-scout-17b-16e-instruct", ) assert response.retries_taken == failures_before_success @@ -1657,7 +1657,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: "role": "system", } ], - model="string", + model="meta-llama/llama-4-scout-17b-16e-instruct", extra_headers={"x-stainless-retry-count": Omit()}, ) @@ -1690,7 +1690,7 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: "role": "system", } ], - model="string", + model="meta-llama/llama-4-scout-17b-16e-instruct", extra_headers={"x-stainless-retry-count": "42"}, ) From 7a0f06aca6223dc04e146f1b75445a2a64ce409a Mon Sep 17 00:00:00 2001 From: Graden Rea <grea@groq.com> Date: Fri, 16 May 2025 12:53:40 -0700 Subject: [PATCH 14/16] chore: fix README example --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 8b7582c..94b51a3 100644 --- a/README.md +++ b/README.md @@ -40,7 +40,7 @@ chat_completion = client.chat.completions.create( ], model="llama3-8b-8192", ) -print(chat_completion.id) +print(chat_completion.choices[0].message.content) ``` While you can provide an `api_key` keyword argument, @@ -72,7 +72,7 @@ async def main() -> None: ], model="llama3-8b-8192", ) - print(chat_completion.id) + print(chat_completion.choices[0].message.content) asyncio.run(main()) From 32a17310c3a481b4b6b7aaa1f742720c367864f5 Mon Sep 17 00:00:00 2001 From: Graden Rea <grea@groq.com> Date: Fri, 16 May 2025 12:55:01 -0700 Subject: [PATCH 15/16] fix: add search settings to all chat completion overloads --- src/groq/resources/chat/completions.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/groq/resources/chat/completions.py b/src/groq/resources/chat/completions.py index f6f69d6..baaf21e 100644 --- a/src/groq/resources/chat/completions.py +++ b/src/groq/resources/chat/completions.py @@ -70,6 +70,7 @@ def create( presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, reasoning_format: Optional[Literal["hidden", "raw", "parsed"]] | NotGiven = NOT_GIVEN, response_format: Optional[completion_create_params.ResponseFormat] | NotGiven = NOT_GIVEN, + search_settings: Optional[completion_create_params.SearchSettings] | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "on_demand", "flex"]] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, @@ -111,6 +112,7 @@ def create( presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, reasoning_format: Optional[Literal["hidden", "raw", "parsed"]] | NotGiven = NOT_GIVEN, response_format: Optional[completion_create_params.ResponseFormat] | NotGiven = NOT_GIVEN, + search_settings: Optional[completion_create_params.SearchSettings] | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "on_demand", "flex"]] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, @@ -152,6 +154,7 @@ def create( presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, reasoning_format: Optional[Literal["hidden", "raw", "parsed"]] | NotGiven = NOT_GIVEN, response_format: Optional[completion_create_params.ResponseFormat] | NotGiven = NOT_GIVEN, + search_settings: Optional[completion_create_params.SearchSettings] | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "on_demand", "flex"]] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, @@ -441,6 +444,7 @@ async def create( presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, reasoning_format: Optional[Literal["hidden", "raw", "parsed"]] | NotGiven = NOT_GIVEN, response_format: Optional[completion_create_params.ResponseFormat] | NotGiven = NOT_GIVEN, + search_settings: Optional[completion_create_params.SearchSettings] | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "on_demand", "flex"]] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, @@ -482,6 +486,7 @@ async def create( presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, reasoning_format: Optional[Literal["hidden", "raw", "parsed"]] | NotGiven = NOT_GIVEN, response_format: Optional[completion_create_params.ResponseFormat] | NotGiven = NOT_GIVEN, + search_settings: Optional[completion_create_params.SearchSettings] | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "on_demand", "flex"]] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, @@ -523,6 +528,7 @@ async def create( presence_penalty: Optional[float] | NotGiven = NOT_GIVEN, reasoning_format: Optional[Literal["hidden", "raw", "parsed"]] | NotGiven = NOT_GIVEN, response_format: Optional[completion_create_params.ResponseFormat] | NotGiven = NOT_GIVEN, + search_settings: Optional[completion_create_params.SearchSettings] | NotGiven = NOT_GIVEN, seed: Optional[int] | NotGiven = NOT_GIVEN, service_tier: Optional[Literal["auto", "on_demand", "flex"]] | NotGiven = NOT_GIVEN, stop: Union[Optional[str], List[str], None] | NotGiven = NOT_GIVEN, From 15c2dffd031128949730a60196fa48de4c7fda9f Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 16 May 2025 19:55:25 +0000 Subject: [PATCH 16/16] release: 0.25.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 32 ++++++++++++++++++++++++++++++++ pyproject.toml | 2 +- src/groq/_version.py | 2 +- 4 files changed, 35 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index d2d60a3..a36746b 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "0.24.0" + ".": "0.25.0" } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 44e3945..4bea37c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,37 @@ # Changelog +## 0.25.0 (2025-05-16) + +Full Changelog: [v0.24.0...v0.25.0](https://github.com/groq/groq-python/compare/v0.24.0...v0.25.0) + +### Features + +* **api:** api update ([385969a](https://github.com/groq/groq-python/commit/385969a478242c67dd07e510632ea1f168f743d8)) +* **api:** api update ([e3acb61](https://github.com/groq/groq-python/commit/e3acb618e09864829089575c51ec617b2f4b6975)) +* **api:** api update ([4bb39e5](https://github.com/groq/groq-python/commit/4bb39e59bbd317de95b3e1d896ab83ee41bc7b3e)) +* **api:** api update ([ce3c251](https://github.com/groq/groq-python/commit/ce3c2514e2e66557de3e583532743cb3806032a2)) + + +### Bug Fixes + +* add search settings to all chat completion overloads ([32a1731](https://github.com/groq/groq-python/commit/32a17310c3a481b4b6b7aaa1f742720c367864f5)) +* GitHub Terraform: Create/Update .github/workflows/stale.yaml [skip ci] ([a365e26](https://github.com/groq/groq-python/commit/a365e262f988103f5757ffd9054b822a72868586)) +* **package:** support direct resource imports ([3ee8779](https://github.com/groq/groq-python/commit/3ee87792c92196abba0a1c7d9400a34d95c58895)) + + +### Chores + +* **ci:** fix installation instructions ([454ff60](https://github.com/groq/groq-python/commit/454ff60a12d7e16127d87ef9c152b9e4a394246d)) +* **ci:** upload sdks to package manager ([1f464f8](https://github.com/groq/groq-python/commit/1f464f84bee361e07214e6b65a6727a1b5e0bc7b)) +* fix README example ([7a0f06a](https://github.com/groq/groq-python/commit/7a0f06aca6223dc04e146f1b75445a2a64ce409a)) +* **internal:** avoid errors for isinstance checks on proxies ([7a04964](https://github.com/groq/groq-python/commit/7a04964d964feaac9dfcf1f46dad1f63a5acf714)) +* use lazy imports for resources ([8c6351a](https://github.com/groq/groq-python/commit/8c6351a0215827dea34174978a78d06a528e6eef)) + + +### Documentation + +* remove or fix invalid readme examples ([4dc027a](https://github.com/groq/groq-python/commit/4dc027a35054db11eec6c8bfe1fd463b0c49db6e)) + ## 0.24.0 (2025-05-02) Full Changelog: [v0.23.1...v0.24.0](https://github.com/groq/groq-python/compare/v0.23.1...v0.24.0) diff --git a/pyproject.toml b/pyproject.toml index 8d1ca7b..45df707 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "groq" -version = "0.24.0" +version = "0.25.0" description = "The official Python library for the groq API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/groq/_version.py b/src/groq/_version.py index 28652de..a9df67c 100644 --- a/src/groq/_version.py +++ b/src/groq/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "groq" -__version__ = "0.24.0" # x-release-please-version +__version__ = "0.25.0" # x-release-please-version