Skip to content

Instantly share code, notes, and snippets.

@intellectronica
Created August 8, 2024 08:07
Show Gist options
  • Save intellectronica/7646a92da658428e0ba237544d66da7c to your computer and use it in GitHub Desktop.
Save intellectronica/7646a92da658428e0ba237544d66da7c to your computer and use it in GitHub Desktop.
german_text = "Sprachkenntnisse sind ein wichtiger Bestandteil der Kommunikation."
class TranslatedString(BaseModel):
input_language: str = Field(
...,
description="The language of the original text, as 2-letter language code."
)
translation: str
print("SCHEMA:")
print_schema(TranslatedString)
translation = client.beta.chat.completions.parse(
model='gpt-4o',
messages=[
{"role": "system", "content": "Detect the language of the original text and translate it into English."},
{"role": "user", "content": german_text},
],
tools=[pydantic_function_tool(TranslatedString)],
temperature=0.5,
max_tokens=1000,
).choices[0].message.tool_calls[0].function.parsed_arguments
print("RESULT (tool):")
print_result(translation)
translation = client.beta.chat.completions.parse(
model='gpt-4o',
messages=[
{"role": "system", "content": "Detect the language of the original text and translate it into English."},
{"role": "user", "content": german_text},
],
response_format=TranslatedString,
temperature=0.5,
max_tokens=1000,
).choices[0].message.parsed
print("RESULT (response_format):")
print_result(translation)
# Output #
#
# SCHEMA:
# {
# "properties": {
# "input_language": {
# "description": "The language of the original text, as 2-letter language code.",
# "title": "Input Language",
# "type": "string"
# },
# "translation": {
# "title": "Translation",
# "type": "string"
# }
# },
# "required": [
# "input_language",
# "translation"
# ],
# "title": "TranslatedString",
# "type": "object"
# }
# RESULT (tool):
# {
# "input_language": "de",
# "translation": "Language skills are an important part of communication."
# }
#
# ---------------------------------------------------------------------------
# BadRequestError Traceback (most recent call last)
# Cell In[12], line 27
# 24 print("RESULT (tool):")
# 25 print_result(translation)
# ---> 27 translation = client.beta.chat.completions.parse(
# 28 model='gpt-4o',
# 29 messages=[
# 30 {"role": "system", "content": "Detect the language of the original text and translate it into English."},
# 31 {"role": "user", "content": german_text},
# 32 ],
# 33 response_format=TranslatedString,
# 34 temperature=0.5,
# 35 max_tokens=1000,
# 36 ).choices[0].message.parsed
# 38 print("RESULT (response_format):")
# 39 print_result(translation)
# File ~/.python/current/lib/python3.10/site-packages/openai/resources/beta/chat/completions.py:112, in Completions.parse(self, messages, model, response_format, frequency_penalty, function_call, functions, logit_bias, logprobs, max_tokens, n, parallel_tool_calls, presence_penalty, seed, service_tier, stop, stream_options, temperature, tool_choice, tools, top_logprobs, top_p, user, extra_headers, extra_query, extra_body, timeout)
# 105 _validate_input_tools(tools)
# 107 extra_headers = {
# 108 "X-Stainless-Helper-Method": "beta.chat.completions.parse",
# 109 **(extra_headers or {}),
# 110 }
# --> 112 raw_completion = self._client.chat.completions.create(
# 113 messages=messages,
# 114 model=model,
# 115 response_format=_type_to_response_format(response_format),
# 116 frequency_penalty=frequency_penalty,
# 117 function_call=function_call,
# 118 functions=functions,
# 119 logit_bias=logit_bias,
# 120 logprobs=logprobs,
# 121 max_tokens=max_tokens,
# 122 n=n,
# 123 parallel_tool_calls=parallel_tool_calls,
# 124 presence_penalty=presence_penalty,
# 125 seed=seed,
# 126 service_tier=service_tier,
# 127 stop=stop,
# 128 stream_options=stream_options,
# 129 temperature=temperature,
# 130 tool_choice=tool_choice,
# 131 tools=tools,
# 132 top_logprobs=top_logprobs,
# 133 top_p=top_p,
# 134 user=user,
# 135 extra_headers=extra_headers,
# 136 extra_query=extra_query,
# 137 extra_body=extra_body,
# 138 timeout=timeout,
# 139 )
# 140 return _parse_chat_completion(
# 141 response_format=response_format,
# 142 chat_completion=raw_completion,
# 143 input_tools=tools,
# 144 )
# File ~/.python/current/lib/python3.10/site-packages/openai/_utils/_utils.py:274, in required_args.<locals>.inner.<locals>.wrapper(*args, **kwargs)
# 272 msg = f"Missing required argument: {quote(missing[0])}"
# 273 raise TypeError(msg)
# --> 274 return func(*args, **kwargs)
# File ~/.python/current/lib/python3.10/site-packages/openai/resources/chat/completions.py:650, in Completions.create(self, messages, model, frequency_penalty, function_call, functions, logit_bias, logprobs, max_tokens, n, parallel_tool_calls, presence_penalty, response_format, seed, service_tier, stop, stream, stream_options, temperature, tool_choice, tools, top_logprobs, top_p, user, extra_headers, extra_query, extra_body, timeout)
# 616 @required_args(["messages", "model"], ["messages", "model", "stream"])
# 617 def create(
# 618 self,
# (...)
# 648 timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
# 649 ) -> ChatCompletion | Stream[ChatCompletionChunk]:
# --> 650 return self._post(
# 651 "/chat/completions",
# 652 body=maybe_transform(
# 653 {
# 654 "messages": messages,
# 655 "model": model,
# 656 "frequency_penalty": frequency_penalty,
# 657 "function_call": function_call,
# 658 "functions": functions,
# 659 "logit_bias": logit_bias,
# 660 "logprobs": logprobs,
# 661 "max_tokens": max_tokens,
# 662 "n": n,
# 663 "parallel_tool_calls": parallel_tool_calls,
# 664 "presence_penalty": presence_penalty,
# 665 "response_format": response_format,
# 666 "seed": seed,
# 667 "service_tier": service_tier,
# 668 "stop": stop,
# 669 "stream": stream,
# 670 "stream_options": stream_options,
# 671 "temperature": temperature,
# 672 "tool_choice": tool_choice,
# 673 "tools": tools,
# 674 "top_logprobs": top_logprobs,
# 675 "top_p": top_p,
# 676 "user": user,
# 677 },
# 678 completion_create_params.CompletionCreateParams,
# 679 ),
# 680 options=make_request_options(
# 681 extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
# 682 ),
# 683 cast_to=ChatCompletion,
# 684 stream=stream or False,
# 685 stream_cls=Stream[ChatCompletionChunk],
# 686 )
# File ~/.python/current/lib/python3.10/site-packages/openai/_base_client.py:1259, in SyncAPIClient.post(self, path, cast_to, body, options, files, stream, stream_cls)
# 1245 def post(
# 1246 self,
# 1247 path: str,
# (...)
# 1254 stream_cls: type[_StreamT] | None = None,
# 1255 ) -> ResponseT | _StreamT:
# 1256 opts = FinalRequestOptions.construct(
# 1257 method="post", url=path, json_data=body, files=to_httpx_files(files), **options
# 1258 )
# -> 1259 return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls))
# File ~/.python/current/lib/python3.10/site-packages/openai/_base_client.py:936, in SyncAPIClient.request(self, cast_to, options, remaining_retries, stream, stream_cls)
# 927 def request(
# 928 self,
# 929 cast_to: Type[ResponseT],
# (...)
# 934 stream_cls: type[_StreamT] | None = None,
# 935 ) -> ResponseT | _StreamT:
# --> 936 return self._request(
# 937 cast_to=cast_to,
# 938 options=options,
# 939 stream=stream,
# 940 stream_cls=stream_cls,
# 941 remaining_retries=remaining_retries,
# 942 )
# File ~/.python/current/lib/python3.10/site-packages/openai/_base_client.py:1040, in SyncAPIClient._request(self, cast_to, options, remaining_retries, stream, stream_cls)
# 1037 err.response.read()
# 1039 log.debug("Re-raising status error")
# -> 1040 raise self._make_status_error_from_response(err.response) from None
# 1042 return self._process_response(
# 1043 cast_to=cast_to,
# 1044 options=options,
# (...)
# 1048 retries_taken=options.get_max_retries(self.max_retries) - retries,
# 1049 )
# BadRequestError: Error code: 400 - {'error': {'message': "Invalid parameter: 'response_format' must be one of 'json_object', 'text'.", 'type': 'invalid_request_error', 'param': 'response_format', 'code': None}}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment