diff --git a/Documentation~/README.md b/Documentation~/README.md
index 65b54bed..cbaf9342 100644
--- a/Documentation~/README.md
+++ b/Documentation~/README.md
@@ -10,8 +10,6 @@ An OpenAI API account is required.
***All copyrights, trademarks, logos, and assets are the property of their respective owners.***
-> This repository is available to transfer to the OpenAI organization if they so choose to accept it.
-
## Installing
Requires Unity 2021.3 LTS or higher.
@@ -64,17 +62,17 @@ The recommended installation method is though the unity package manager and [Ope
- [List Models](#list-models)
- [Retrieve Models](#retrieve-model)
- [Delete Fine Tuned Model](#delete-fine-tuned-model)
-- [Assistants](#assistants) :warning: :construction:
+- [Assistants](#assistants)
- [List Assistants](#list-assistants)
- [Create Assistant](#create-assistant)
- [Retrieve Assistant](#retrieve-assistant)
- [Modify Assistant](#modify-assistant)
- [Delete Assistant](#delete-assistant)
- - [Assistant Streaming](#assistant-streaming) :warning: :construction:
- - [Threads](#threads) :warning: :construction:
+ - [Assistant Streaming](#assistant-streaming)
+ - [Threads](#threads)
- [Create Thread](#create-thread)
- [Create Thread and Run](#create-thread-and-run)
- - [Streaming](#create-thread-and-run-streaming) :warning: :construction:
+ - [Streaming](#create-thread-and-run-streaming)
- [Retrieve Thread](#retrieve-thread)
- [Modify Thread](#modify-thread)
- [Delete Thread](#delete-thread)
@@ -86,10 +84,11 @@ The recommended installation method is though the unity package manager and [Ope
- [Thread Runs](#thread-runs)
- [List Runs](#list-thread-runs)
- [Create Run](#create-thread-run)
- - [Streaming](#create-thread-run-streaming) :warning: :construction:
+ - [Streaming](#create-thread-run-streaming)
- [Retrieve Run](#retrieve-thread-run)
- [Modify Run](#modify-thread-run)
- [Submit Tool Outputs to Run](#thread-submit-tool-outputs-to-run)
+ - [Structured Outputs](#thread-structured-outputs) :new:
- [List Run Steps](#list-thread-run-steps)
- [Retrieve Run Step](#retrieve-thread-run-step)
- [Cancel Run](#cancel-thread-run)
@@ -114,13 +113,14 @@ The recommended installation method is though the unity package manager and [Ope
- [Streaming](#chat-streaming)
- [Tools](#chat-tools)
- [Vision](#chat-vision)
+ - [Json Schema](#chat-json-schema) :new:
- [Json Mode](#chat-json-mode)
- [Audio](#audio)
- [Create Speech](#create-speech)
- [Stream Speech](#stream-speech)
- [Create Transcription](#create-transcription)
- [Create Translation](#create-translation)
-- [Images](#images) :warning: :construction:
+- [Images](#images)
- [Create Image](#create-image)
- [Edit Image](#edit-image)
- [Create Image Variation](#create-image-variation)
@@ -306,14 +306,15 @@ This setup allows your front end application to securely communicate with your b
#### Back End Example
-In this example, we demonstrate how to set up and use `OpenAIProxyStartup` in a new ASP.NET Core web app. The proxy server will handle authentication and forward requests to the OpenAI API, ensuring that your API keys and other sensitive information remain secure.
+In this example, we demonstrate how to set up and use `OpenAIProxy` in a new ASP.NET Core web app. The proxy server will handle authentication and forward requests to the OpenAI API, ensuring that your API keys and other sensitive information remain secure.
1. Create a new [ASP.NET Core minimal web API](https://learn.microsoft.com/en-us/aspnet/core/tutorials/min-web-api?view=aspnetcore-6.0) project.
2. Add the OpenAI-DotNet nuget package to your project.
- Powershell install: `Install-Package OpenAI-DotNet-Proxy`
+ - Dotnet install: `dotnet add package OpenAI-DotNet-Proxy`
- Manually editing .csproj: ``
3. Create a new class that inherits from `AbstractAuthenticationFilter` and override the `ValidateAuthentication` method. This will implement the `IAuthenticationFilter` that you will use to check user session token against your internal server.
-4. In `Program.cs`, create a new proxy web application by calling `OpenAIProxyStartup.CreateDefaultHost` method, passing your custom `AuthenticationFilter` as a type argument.
+4. In `Program.cs`, create a new proxy web application by calling `OpenAIProxy.CreateWebApplication` method, passing your custom `AuthenticationFilter` as a type argument.
5. Create `OpenAIAuthentication` and `OpenAIClientSettings` as you would normally with your API keys, org id, or Azure settings.
```csharp
@@ -321,16 +322,6 @@ public partial class Program
{
private class AuthenticationFilter : AbstractAuthenticationFilter
{
- public override void ValidateAuthentication(IHeaderDictionary request)
- {
- // You will need to implement your own class to properly test
- // custom issued tokens you've setup for your end users.
- if (!request.Authorization.ToString().Contains(TestUserToken))
- {
- throw new AuthenticationException("User is not authorized");
- }
- }
-
public override async Task ValidateAuthenticationAsync(IHeaderDictionary request)
{
await Task.CompletedTask; // remote resource call
@@ -349,7 +340,7 @@ public partial class Program
var auth = OpenAIAuthentication.LoadFromEnv();
var settings = new OpenAIClientSettings(/* your custom settings if using Azure OpenAI */);
using var openAIClient = new OpenAIClient(auth, settings);
- OpenAIProxyStartup.CreateWebApplication(args, openAIClient).Run();
+ OpenAIProxy.CreateWebApplication(args, openAIClient).Run();
}
}
```
@@ -814,6 +805,87 @@ foreach (var message in messages.Items.OrderBy(response => response.CreatedAt))
}
```
+##### [Thread Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
+
+Structured Outputs is the evolution of JSON mode. While both ensure valid JSON is produced, only Structured Outputs ensure schema adherence.
+
+> [!IMPORTANT]
+>
+> - When using JSON mode, always instruct the model to produce JSON via some message in the conversation, for example via your system message. If you don't include an explicit instruction to generate JSON, the model may generate an unending stream of whitespace and the request may run continually until it reaches the token limit. To help ensure you don't forget, the API will throw an error if the string "JSON" does not appear somewhere in the context.
+> - The JSON in the message the model returns may be partial (i.e. cut off) if `finish_reason` is length, which indicates the generation exceeded max_tokens or the conversation exceeded the token limit. To guard against this, check `finish_reason` before parsing the response.
+
+```csharp
+var mathSchema = new JsonSchema("math_response", @"
+{
+ ""type"": ""object"",
+ ""properties"": {
+ ""steps"": {
+ ""type"": ""array"",
+ ""items"": {
+ ""type"": ""object"",
+ ""properties"": {
+ ""explanation"": {
+ ""type"": ""string""
+ },
+ ""output"": {
+ ""type"": ""string""
+ }
+ },
+ ""required"": [
+ ""explanation"",
+ ""output""
+ ],
+ ""additionalProperties"": false
+ }
+ },
+ ""final_answer"": {
+ ""type"": ""string""
+ }
+ },
+ ""required"": [
+ ""steps"",
+ ""final_answer""
+ ],
+ ""additionalProperties"": false
+}");
+var assistant = await OpenAIClient.AssistantsEndpoint.CreateAssistantAsync(
+ new CreateAssistantRequest(
+ name: "Math Tutor",
+ instructions: "You are a helpful math tutor. Guide the user through the solution step by step.",
+ model: "gpt-4o-2024-08-06",
+ jsonSchema: mathSchema));
+ThreadResponse thread = null;
+
+try
+{
+ var run = await assistant.CreateThreadAndRunAsync("how can I solve 8x + 7 = -23",
+ async @event =>
+ {
+ Debug.Log(@event.ToJsonString());
+ await Task.CompletedTask;
+ });
+ thread = await run.GetThreadAsync();
+ run = await run.WaitForStatusChangeAsync();
+ Debug.Log($"Created thread and run: {run.ThreadId} -> {run.Id} -> {run.CreatedAt}");
+ var messages = await thread.ListMessagesAsync();
+
+ foreach (var response in messages.Items)
+ {
+ Debug.Log($"{response.Role}: {response.PrintContent()}");
+ }
+}
+finally
+{
+ await assistant.DeleteAsync(deleteToolResources: thread == null);
+
+ if (thread != null)
+ {
+ var isDeleted = await thread.DeleteAsync(deleteToolResources: true);
+ Assert.IsTrue(isDeleted);
+ }
+}
+```
+
###### [List Thread Run Steps](https://platform.openai.com/docs/api-reference/runs/listRunSteps)
Returns a list of run steps belonging to a run.
@@ -1061,7 +1133,7 @@ var chatRequest = new ChatRequest(messages);
var response = await api.ChatEndpoint.StreamCompletionAsync(chatRequest, async partialResponse =>
{
Debug.Log(partialResponse.FirstChoice.Delta.ToString());
- await Task.Completed;
+ await Task.CompletedTask;
});
var choice = response.FirstChoice;
Debug.Log($"[{choice.Index}] {choice.Message.Role}: {choice.Message} | Finish Reason: {choice.FinishReason}");
@@ -1179,10 +1251,67 @@ var result = await api.ChatEndpoint.GetCompletionAsync(chatRequest);
Debug.Log($"{result.FirstChoice.Message.Role}: {result.FirstChoice} | Finish Reason: {result.FirstChoice.FinishDetails}");
```
-#### [Chat Json Mode](https://platform.openai.com/docs/guides/text-generation/json-mode)
+#### [Chat Json Schema](https://platform.openai.com/docs/guides/structured-outputs)
-> [!WARNING]
-> Beta Feature. API subject to breaking changes.
+The evolution of [Json Mode](#chat-json-mode). While both ensure valid JSON is produced, only Structured Outputs ensure schema adherence.
+
+> [!IMPORTANT]
+>
+> - When using JSON mode, always instruct the model to produce JSON via some message in the conversation, for example via your system message. If you don't include an explicit instruction to generate JSON, the model may generate an unending stream of whitespace and the request may run continually until it reaches the token limit. To help ensure you don't forget, the API will throw an error if the string "JSON" does not appear somewhere in the context.
+> - The JSON in the message the model returns may be partial (i.e. cut off) if `finish_reason` is length, which indicates the generation exceeded max_tokens or the conversation exceeded the token limit. To guard against this, check `finish_reason` before parsing the response.
+
+```csharp
+var messages = new List
+{
+ new(Role.System, "You are a helpful math tutor. Guide the user through the solution step by step."),
+ new(Role.User, "how can I solve 8x + 7 = -23")
+};
+
+var mathSchema = new JsonSchema("math_response", @"
+{
+ ""type"": ""object"",
+ ""properties"": {
+ ""steps"": {
+ ""type"": ""array"",
+ ""items"": {
+ ""type"": ""object"",
+ ""properties"": {
+ ""explanation"": {
+ ""type"": ""string""
+ },
+ ""output"": {
+ ""type"": ""string""
+ }
+ },
+ ""required"": [
+ ""explanation"",
+ ""output""
+ ],
+ ""additionalProperties"": false
+ }
+ },
+ ""final_answer"": {
+ ""type"": ""string""
+ }
+ },
+ ""required"": [
+ ""steps"",
+ ""final_answer""
+ ],
+ ""additionalProperties"": false
+}");
+var chatRequest = new ChatRequest(messages, model: new("gpt-4o-2024-08-06"), jsonSchema: mathSchema);
+var response = await OpenAIClient.ChatEndpoint.GetCompletionAsync(chatRequest);
+
+foreach (var choice in response.Choices)
+{
+ Debug.Log($"[{choice.Index}] {choice.Message.Role}: {choice} | Finish Reason: {choice.FinishReason}");
+}
+
+response.GetUsage();
+```
+
+#### [Chat Json Mode](https://platform.openai.com/docs/guides/text-generation/json-mode)
> [!IMPORTANT]
>
diff --git a/Runtime/Assistants/AssistantExtensions.cs b/Runtime/Assistants/AssistantExtensions.cs
index c73211c7..5413ae01 100644
--- a/Runtime/Assistants/AssistantExtensions.cs
+++ b/Runtime/Assistants/AssistantExtensions.cs
@@ -81,7 +81,21 @@ public static async Task CreateThreadAndRunAsync(this AssistantResp
/// Optional, .
/// .
public static async Task CreateThreadAndRunAsync(this AssistantResponse assistant, CreateThreadRequest request = null, Func streamEventHandler = null, CancellationToken cancellationToken = default)
- => await assistant.Client.ThreadsEndpoint.CreateThreadAndRunAsync(new CreateThreadAndRunRequest(assistant.Id, createThreadRequest: request), streamEventHandler, cancellationToken);
+ {
+ var threadRunRequest = new CreateThreadAndRunRequest(
+ assistant.Id,
+ assistant.Model,
+ assistant.Instructions,
+ assistant.Tools,
+ assistant.ToolResources,
+ assistant.Metadata,
+ assistant.Temperature,
+ assistant.TopP,
+ jsonSchema: assistant.ResponseFormatObject?.JsonSchema,
+ responseFormat: assistant.ResponseFormat,
+ createThreadRequest: request);
+ return await assistant.Client.ThreadsEndpoint.CreateThreadAndRunAsync(threadRunRequest, streamEventHandler, cancellationToken);
+ }
#region Tools
diff --git a/Runtime/Assistants/AssistantResponse.cs b/Runtime/Assistants/AssistantResponse.cs
index a086a674..656185bd 100644
--- a/Runtime/Assistants/AssistantResponse.cs
+++ b/Runtime/Assistants/AssistantResponse.cs
@@ -1,7 +1,6 @@
// Licensed under the MIT License. See LICENSE in the project root for license information.
using Newtonsoft.Json;
-using OpenAI.Extensions;
using System;
using System.Collections.Generic;
using UnityEngine.Scripting;
@@ -29,7 +28,7 @@ internal AssistantResponse(
[JsonProperty("metadata")] Dictionary metadata,
[JsonProperty("temperature")] double temperature,
[JsonProperty("top_p")] double topP,
- [JsonProperty("response_format")][JsonConverter(typeof(ResponseFormatConverter))] ChatResponseFormat responseFormat)
+ [JsonProperty("response_format")] ResponseFormatObject responseFormat)
{
Id = id;
Object = @object;
@@ -43,7 +42,7 @@ internal AssistantResponse(
Metadata = metadata;
Temperature = temperature;
TopP = topP;
- ResponseFormat = responseFormat;
+ ResponseFormatObject = responseFormat;
}
///
@@ -173,8 +172,10 @@ internal AssistantResponse(
///
[Preserve]
[JsonProperty("response_format")]
- [JsonConverter(typeof(ResponseFormatConverter))]
- public ChatResponseFormat ResponseFormat { get; }
+ public ResponseFormatObject ResponseFormatObject { get; }
+
+ [JsonIgnore]
+ public ChatResponseFormat ResponseFormat => ResponseFormatObject ?? ChatResponseFormat.Auto;
[Preserve]
public static implicit operator string(AssistantResponse assistant) => assistant?.Id;
diff --git a/Runtime/Assistants/CreateAssistantRequest.cs b/Runtime/Assistants/CreateAssistantRequest.cs
index 94e76888..dc26aae0 100644
--- a/Runtime/Assistants/CreateAssistantRequest.cs
+++ b/Runtime/Assistants/CreateAssistantRequest.cs
@@ -1,7 +1,7 @@
// Licensed under the MIT License. See LICENSE in the project root for license information.
using Newtonsoft.Json;
-using OpenAI.Extensions;
+using Newtonsoft.Json.Schema;
using System;
using System.Collections.Generic;
using System.Linq;
@@ -58,14 +58,18 @@ public sealed class CreateAssistantRequest
/// So 0.1 means only the tokens comprising the top 10% probability mass are considered.
/// We generally recommend altering this or temperature but not both.
///
+ ///
+ /// The to use for structured JSON outputs.
+ ///
+ ///
+ ///
///
/// Specifies the format that the model must output.
/// Setting to enables JSON mode,
/// which guarantees the message the model generates is valid JSON.
- /// Important: When using JSON mode you must still instruct the model to produce JSON yourself via some conversation message,
- /// for example via your system message. If you don't do this, the model may generate an unending stream of
- /// whitespace until the generation reaches the token limit, which may take a lot of time and give the appearance
- /// of a "stuck" request. Also note that the message content may be partial (i.e. cut off) if finish_reason="length",
+ /// Important: When using JSON mode, you must also instruct the model to produce JSON yourself via a system or user message.
+ /// Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit,
+ /// resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if finish_reason="length",
/// which indicates the generation exceeded max_tokens or the conversation exceeded the max context length.
///
[Preserve]
@@ -80,6 +84,7 @@ public CreateAssistantRequest(
IReadOnlyDictionary metadata = null,
double? temperature = null,
double? topP = null,
+ JsonSchema jsonSchema = null,
ChatResponseFormat? responseFormat = null)
: this(
string.IsNullOrWhiteSpace(model) ? assistant.Model : model,
@@ -91,6 +96,7 @@ public CreateAssistantRequest(
metadata ?? assistant.Metadata,
temperature ?? assistant.Temperature,
topP ?? assistant.TopP,
+ jsonSchema ?? assistant.ResponseFormatObject?.JsonSchema,
responseFormat ?? assistant.ResponseFormat)
{
}
@@ -153,14 +159,18 @@ public CreateAssistantRequest(
/// So 0.1 means only the tokens comprising the top 10% probability mass are considered.
/// We generally recommend altering this or temperature but not both.
///
+ ///
+ /// The to use for structured JSON outputs.
+ ///
+ ///
+ ///
///
/// Specifies the format that the model must output.
- /// Setting to enables JSON mode,
+ /// Setting to or enables JSON mode,
/// which guarantees the message the model generates is valid JSON.
- /// Important: When using JSON mode you must still instruct the model to produce JSON yourself via some conversation message,
- /// for example via your system message. If you don't do this, the model may generate an unending stream of
- /// whitespace until the generation reaches the token limit, which may take a lot of time and give the appearance
- /// of a "stuck" request. Also note that the message content may be partial (i.e. cut off) if finish_reason="length",
+ /// Important: When using JSON mode, you must also instruct the model to produce JSON yourself via a system or user message.
+ /// Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit,
+ /// resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if finish_reason="length",
/// which indicates the generation exceeded max_tokens or the conversation exceeded the max context length.
///
[Preserve]
@@ -174,7 +184,8 @@ public CreateAssistantRequest(
IReadOnlyDictionary metadata = null,
double? temperature = null,
double? topP = null,
- ChatResponseFormat responseFormat = ChatResponseFormat.Auto)
+ JsonSchema jsonSchema = null,
+ ChatResponseFormat responseFormat = ChatResponseFormat.Text)
{
Model = string.IsNullOrWhiteSpace(model) ? Models.Model.GPT4o : model;
Name = name;
@@ -185,7 +196,15 @@ public CreateAssistantRequest(
Metadata = metadata;
Temperature = temperature;
TopP = topP;
- ResponseFormat = responseFormat;
+
+ if (jsonSchema != null)
+ {
+ ResponseFormatObject = jsonSchema;
+ }
+ else
+ {
+ ResponseFormatObject = responseFormat;
+ }
}
///
@@ -271,8 +290,10 @@ public CreateAssistantRequest(
///
[Preserve]
[JsonProperty("response_format", DefaultValueHandling = DefaultValueHandling.Ignore)]
- [JsonConverter(typeof(ResponseFormatConverter))]
- public ChatResponseFormat ResponseFormat { get; }
+ public ResponseFormatObject ResponseFormatObject { get; }
+
+ [JsonIgnore]
+ public ChatResponseFormat ResponseFormat => ResponseFormatObject ?? ChatResponseFormat.Auto;
///
/// Set of 16 key-value pairs that can be attached to an object.
diff --git a/Runtime/Audio/AudioEndpoint.cs b/Runtime/Audio/AudioEndpoint.cs
index d21a0f24..b09cd4e7 100644
--- a/Runtime/Audio/AudioEndpoint.cs
+++ b/Runtime/Audio/AudioEndpoint.cs
@@ -23,6 +23,8 @@ internal AudioEndpoint(OpenAIClient client) : base(client) { }
///
protected override string Root => "audio";
+ protected override bool? IsAzureDeployment => true;
+
private static readonly object mutex = new();
///
diff --git a/Runtime/Authentication/OpenAISettingsInfo.cs b/Runtime/Authentication/OpenAISettingsInfo.cs
index 9d6dfc5d..6b0dd2d4 100644
--- a/Runtime/Authentication/OpenAISettingsInfo.cs
+++ b/Runtime/Authentication/OpenAISettingsInfo.cs
@@ -1,6 +1,7 @@
// Licensed under the MIT License. See LICENSE in the project root for license information.
using System;
+using System.Collections.Generic;
using Utilities.WebRequestRest.Interfaces;
namespace OpenAI
@@ -11,7 +12,7 @@ public sealed class OpenAISettingsInfo : ISettingsInfo
internal const string OpenAIDomain = "api.openai.com";
internal const string DefaultOpenAIApiVersion = "v1";
internal const string AzureOpenAIDomain = "openai.azure.com";
- internal const string DefaultAzureApiVersion = "2022-12-01";
+ internal const string DefaultAzureApiVersion = "2023-05-01";
///
/// Creates a new instance of for use with OpenAI.
@@ -94,8 +95,9 @@ public OpenAISettingsInfo(string resourceName, string deploymentId, string apiVe
ResourceName = resourceName;
DeploymentId = deploymentId;
ApiVersion = apiVersion;
- BaseRequest = $"/openai/deployments/{DeploymentId}/";
- BaseRequestUrlFormat = $"{Https}{ResourceName}.{AzureOpenAIDomain}{BaseRequest}{{0}}?api-version={ApiVersion}";
+ BaseRequest = "/openai/";
+ BaseRequestUrlFormat = $"{Https}{ResourceName}.{AzureOpenAIDomain}{BaseRequest}{{0}}";
+ defaultQueryParameters.Add("api-version", ApiVersion);
UseOAuthAuthentication = useActiveDirectoryAuthentication;
}
@@ -111,6 +113,13 @@ public OpenAISettingsInfo(string resourceName, string deploymentId, string apiVe
internal bool UseOAuthAuthentication { get; }
- public bool IsAzureDeployment => BaseRequestUrlFormat.Contains(AzureOpenAIDomain);
+ [Obsolete("Use IsAzureOpenAI")]
+ public bool IsAzureDeployment => IsAzureOpenAI;
+
+ public bool IsAzureOpenAI => BaseRequestUrlFormat.Contains(AzureOpenAIDomain);
+
+ private readonly Dictionary defaultQueryParameters = new();
+
+ internal IReadOnlyDictionary DefaultQueryParameters => defaultQueryParameters;
}
}
diff --git a/Runtime/Chat/ChatEndpoint.cs b/Runtime/Chat/ChatEndpoint.cs
index 1515460f..57a97346 100644
--- a/Runtime/Chat/ChatEndpoint.cs
+++ b/Runtime/Chat/ChatEndpoint.cs
@@ -21,6 +21,8 @@ internal ChatEndpoint(OpenAIClient client) : base(client) { }
///
protected override string Root => "chat";
+ protected override bool? IsAzureDeployment => true;
+
///
/// Creates a completion for the chat message.
///
diff --git a/Runtime/Chat/ChatRequest.cs b/Runtime/Chat/ChatRequest.cs
index c00771b5..96f904a8 100644
--- a/Runtime/Chat/ChatRequest.cs
+++ b/Runtime/Chat/ChatRequest.cs
@@ -1,7 +1,6 @@
// Licensed under the MIT License. See LICENSE in the project root for license information.
using Newtonsoft.Json;
-using OpenAI.Extensions;
using System;
using System.Collections.Generic;
using System.Linq;
@@ -24,15 +23,17 @@ public ChatRequest(
int? maxTokens = null,
int? number = null,
double? presencePenalty = null,
- ChatResponseFormat responseFormat = ChatResponseFormat.Auto,
+ ChatResponseFormat responseFormat = ChatResponseFormat.Text,
int? seed = null,
string[] stops = null,
double? temperature = null,
double? topP = null,
int? topLogProbs = null,
bool? parallelToolCalls = null,
+ JsonSchema jsonSchema = null,
string user = null)
- : this(messages, model, frequencyPenalty, logitBias, maxTokens, number, presencePenalty, responseFormat, seed, stops, temperature, topP, topLogProbs, parallelToolCalls, user)
+ : this(messages, model, frequencyPenalty, logitBias, maxTokens, number, presencePenalty,
+ responseFormat, seed, stops, temperature, topP, topLogProbs, parallelToolCalls, jsonSchema, user)
{
var toolList = tools?.ToList();
@@ -105,7 +106,7 @@ public ChatRequest(
///
///
/// An object specifying the format that the model must output.
- /// Setting to enables JSON mode,
+ /// Setting to or enables JSON mode,
/// which guarantees the message the model generates is valid JSON.
///
///
@@ -127,6 +128,11 @@ public ChatRequest(
/// An integer between 0 and 5 specifying the number of most likely tokens to return at each token position,
/// each with an associated log probability.
///
+ ///
+ /// The to use for structured JSON outputs.
+ ///
+ ///
+ ///
/// Whether to enable parallel function calling during tool use.
///
/// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.
@@ -140,13 +146,14 @@ public ChatRequest(
int? maxTokens = null,
int? number = null,
double? presencePenalty = null,
- ChatResponseFormat responseFormat = ChatResponseFormat.Auto,
+ ChatResponseFormat responseFormat = ChatResponseFormat.Text,
int? seed = null,
string[] stops = null,
double? temperature = null,
double? topP = null,
int? topLogProbs = null,
bool? parallelToolCalls = null,
+ JsonSchema jsonSchema = null,
string user = null)
{
Messages = messages?.ToList();
@@ -162,7 +169,16 @@ public ChatRequest(
MaxTokens = maxTokens;
Number = number;
PresencePenalty = presencePenalty;
- ResponseFormat = responseFormat;
+
+ if (jsonSchema != null)
+ {
+ ResponseFormatObject = jsonSchema;
+ }
+ else
+ {
+ ResponseFormatObject = responseFormat;
+ }
+
Seed = seed;
Stops = stops;
Temperature = temperature;
@@ -260,20 +276,21 @@ public ChatRequest(
///
/// An object specifying the format that the model must output.
- /// Setting to enables JSON mode,
+ /// Setting to or enables JSON mode,
/// which guarantees the message the model generates is valid JSON.
///
///
- /// Important: When using JSON mode you must still instruct the model to produce JSON yourself via some conversation message,
- /// for example via your system message. If you don't do this, the model may generate an unending stream of
- /// whitespace until the generation reaches the token limit, which may take a lot of time and give the appearance
- /// of a "stuck" request. Also note that the message content may be partial (i.e. cut off) if finish_reason="length",
+ /// Important: When using JSON mode, you must also instruct the model to produce JSON yourself via a system or user message.
+ /// Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit,
+ /// resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if finish_reason="length",
/// which indicates the generation exceeded max_tokens or the conversation exceeded the max context length.
///
[Preserve]
[JsonProperty("response_format")]
- [JsonConverter(typeof(ResponseFormatConverter))]
- public ChatResponseFormat ResponseFormat { get; }
+ public ResponseFormatObject ResponseFormatObject { get; }
+
+ [JsonIgnore]
+ public ChatResponseFormat ResponseFormat => ResponseFormatObject ?? ChatResponseFormat.Auto;
///
/// This feature is in Beta. If specified, our system will make a best effort to sample deterministically,
diff --git a/Runtime/Common/ChatResponseFormat.cs b/Runtime/Common/ChatResponseFormat.cs
index c9294946..4f4bab79 100644
--- a/Runtime/Common/ChatResponseFormat.cs
+++ b/Runtime/Common/ChatResponseFormat.cs
@@ -10,6 +10,8 @@ public enum ChatResponseFormat
[EnumMember(Value = "text")]
Text,
[EnumMember(Value = "json_object")]
- Json
+ Json,
+ [EnumMember(Value = "json_schema")]
+ JsonSchema
}
}
diff --git a/Runtime/Common/Function.cs b/Runtime/Common/Function.cs
index bc1a8015..fad994d2 100644
--- a/Runtime/Common/Function.cs
+++ b/Runtime/Common/Function.cs
@@ -6,6 +6,7 @@
using System;
using System.Collections.Concurrent;
using System.Collections.Generic;
+using System.Linq;
using System.Reflection;
using System.Text.RegularExpressions;
using System.Threading;
@@ -40,8 +41,14 @@ public Function() { }
///
/// An optional JSON object describing the parameters of the function that the model can generate.
///
+ ///
+ /// Whether to enable strict schema adherence when generating the function call.
+ /// If set to true, the model will follow the exact schema defined in the parameters field.
+ /// Only a subset of JSON Schema is supported when strict is true. Learn more about Structured Outputs in the function calling guide.
+ ///
+ ///
[Preserve]
- public Function(string name, string description = null, JToken parameters = null)
+ public Function(string name, string description = null, JToken parameters = null, bool? strict = null)
{
if (!Regex.IsMatch(name, NameRegex))
{
@@ -51,6 +58,7 @@ public Function(string name, string description = null, JToken parameters = null
Name = name;
Description = description;
Parameters = parameters;
+ Strict = strict;
}
///
@@ -66,8 +74,14 @@ public Function(string name, string description = null, JToken parameters = null
///
/// An optional JSON describing the parameters of the function that the model can generate.
///
+ ///
+ /// Whether to enable strict schema adherence when generating the function call.
+ /// If set to true, the model will follow the exact schema defined in the parameters field.
+ /// Only a subset of JSON Schema is supported when strict is true. Learn more about Structured Outputs in the function calling guide.
+ ///
+ ///
[Preserve]
- public Function(string name, string description, string parameters)
+ public Function(string name, string description, string parameters, bool? strict = null)
{
if (!Regex.IsMatch(name, NameRegex))
{
@@ -77,17 +91,19 @@ public Function(string name, string description, string parameters)
Name = name;
Description = description;
Parameters = new JObject(parameters);
+ Strict = strict;
}
[Preserve]
- internal Function(string name, JToken arguments)
+ internal Function(string name, JToken arguments, bool? strict = null)
{
Name = name;
Arguments = arguments;
+ Strict = strict;
}
[Preserve]
- private Function(string name, string description, MethodInfo method, object instance = null)
+ private Function(string name, string description, MethodInfo method, object instance = null, bool? strict = null)
{
if (!Regex.IsMatch(name, NameRegex))
{
@@ -104,51 +120,52 @@ private Function(string name, string description, MethodInfo method, object inst
MethodInfo = method;
Parameters = method.GenerateJsonSchema();
Instance = instance;
+ Strict = strict;
functionCache[Name] = this;
}
- internal static Function GetOrCreateFunction(string name, string description, MethodInfo method, object instance = null)
+ internal static Function GetOrCreateFunction(string name, string description, MethodInfo method, object instance = null, bool? strict = null)
=> functionCache.TryGetValue(name, out var function)
? function
- : new Function(name, description, method, instance);
+ : new Function(name, description, method, instance, strict);
#region Func<,> Overloads
- public static Function FromFunc(string name, Func function, string description = null)
- => GetOrCreateFunction(name, description, function.Method, function.Target);
+ public static Function FromFunc(string name, Func function, string description = null, bool? strict = null)
+ => GetOrCreateFunction(name, description, function.Method, function.Target, strict);
- public static Function FromFunc(string name, Func function, string description = null)
- => GetOrCreateFunction(name, description, function.Method, function.Target);
+ public static Function FromFunc(string name, Func function, string description = null, bool? strict = null)
+ => GetOrCreateFunction(name, description, function.Method, function.Target, strict);
- public static Function FromFunc(string name, Func function, string description = null)
- => GetOrCreateFunction(name, description, function.Method, function.Target);
+ public static Function FromFunc(string name, Func function, string description = null, bool? strict = null)
+ => GetOrCreateFunction(name, description, function.Method, function.Target, strict);
- public static Function FromFunc(string name, Func function, string description = null)
- => GetOrCreateFunction(name, description, function.Method, function.Target);
+ public static Function FromFunc(string name, Func function, string description = null, bool? strict = null)
+ => GetOrCreateFunction(name, description, function.Method, function.Target, strict);
- public static Function FromFunc(string name, Func function, string description = null)
- => GetOrCreateFunction(name, description, function.Method, function.Target);
+ public static Function FromFunc(string name, Func function, string description = null, bool? strict = null)
+ => GetOrCreateFunction(name, description, function.Method, function.Target, strict);
- public static Function FromFunc(string name, Func function, string description = null)
- => GetOrCreateFunction(name, description, function.Method, function.Target);
+ public static Function FromFunc(string name, Func function, string description = null, bool? strict = null)
+ => GetOrCreateFunction(name, description, function.Method, function.Target, strict);
- public static Function FromFunc(string name, Func function, string description = null)
- => GetOrCreateFunction(name, description, function.Method, function.Target);
+ public static Function FromFunc(string name, Func function, string description = null, bool? strict = null)
+ => GetOrCreateFunction(name, description, function.Method, function.Target, strict);
- public static Function FromFunc(string name, Func function, string description = null)
- => GetOrCreateFunction(name, description, function.Method, function.Target);
+ public static Function FromFunc(string name, Func function, string description = null, bool? strict = null)
+ => GetOrCreateFunction(name, description, function.Method, function.Target, strict);
- public static Function FromFunc(string name, Func function, string description = null)
- => GetOrCreateFunction(name, description, function.Method, function.Target);
+ public static Function FromFunc(string name, Func function, string description = null, bool? strict = null)
+ => GetOrCreateFunction(name, description, function.Method, function.Target, strict);
- public static Function FromFunc(string name, Func function, string description = null)
- => GetOrCreateFunction(name, description, function.Method, function.Target);
+ public static Function FromFunc(string name, Func function, string description = null, bool? strict = null)
+ => GetOrCreateFunction(name, description, function.Method, function.Target, strict);
- public static Function FromFunc(string name, Func function, string description = null)
- => GetOrCreateFunction(name, description, function.Method, function.Target);
+ public static Function FromFunc(string name, Func function, string description = null, bool? strict = null)
+ => GetOrCreateFunction(name, description, function.Method, function.Target, strict);
- public static Function FromFunc(string name, Func function, string description = null)
- => GetOrCreateFunction(name, description, function.Method, function.Target);
+ public static Function FromFunc(string name, Func function, string description = null, bool? strict = null)
+ => GetOrCreateFunction(name, description, function.Method, function.Target, strict);
#endregion Func<,> Overloads
@@ -220,6 +237,17 @@ public JToken Arguments
internal set => arguments = value;
}
+ ///
+ /// Whether to enable strict schema adherence when generating the function call.
+ /// If set to true, the model will follow the exact schema defined in the parameters field.
+ ///
+ ///
+ /// Only a subset of JSON Schema is supported when strict is true.
+ ///
+ [Preserve]
+ [JsonProperty("strict", DefaultValueHandling = DefaultValueHandling.Ignore)]
+ public bool? Strict { get; private set; }
+
///
/// The instance of the object to invoke the method on.
///
@@ -405,7 +433,7 @@ private static async Task InvokeInternalAsync(Function function, object[]
private (Function function, object[] invokeArgs) ValidateFunctionArguments(CancellationToken cancellationToken = default)
{
- if (Parameters is { HasValues: true } && Arguments == null)
+ if (Parameters?["properties"] is { HasValues: true } && Arguments == null)
{
throw new ArgumentException($"Function {Name} has parameters but no arguments are set.");
}
diff --git a/Runtime/Common/JsonSchema.cs b/Runtime/Common/JsonSchema.cs
new file mode 100644
index 00000000..dd4fd92b
--- /dev/null
+++ b/Runtime/Common/JsonSchema.cs
@@ -0,0 +1,82 @@
+// Licensed under the MIT License. See LICENSE in the project root for license information.
+
+using Newtonsoft.Json;
+using Newtonsoft.Json.Linq;
+using UnityEngine.Scripting;
+
+namespace OpenAI
+{
+ [Preserve]
+ public sealed class JsonSchema
+ {
+ ///
+ public JsonSchema(string name, string schema, string description = null, bool strict = true)
+ : this(name, JToken.Parse(schema), description, strict) { }
+
+ ///
+ /// Constructor.
+ ///
+ ///
+ /// The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.
+ ///
+ ///
+ /// The schema for the response format, described as a JSON Schema object.
+ ///
+ ///
+ /// A description of what the response format is for, used by the model to determine how to respond in the format.
+ ///
+ ///
+ /// Whether to enable strict schema adherence when generating the output.
+ /// If set to true, the model will always follow the exact schema defined in the schema field.
+ /// Only a subset of JSON Schema is supported when strict is true.
+ ///
+ [Preserve]
+ [JsonConstructor]
+ public JsonSchema(
+ [JsonProperty("name")] string name,
+ [JsonProperty("schema")] JToken schema,
+ [JsonProperty("description")] string description = null,
+ [JsonProperty("strict")] bool strict = true)
+ {
+ Name = name;
+ Description = description;
+ Strict = strict;
+ Schema = schema;
+ }
+
+ ///
+ /// The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.
+ ///
+ [Preserve]
+ [JsonProperty("name")]
+ public string Name { get; }
+
+ ///
+ /// A description of what the response format is for, used by the model to determine how to respond in the format.
+ ///
+ [Preserve]
+ [JsonProperty("description")]
+ public string Description { get; }
+
+ ///
+ /// Whether to enable strict schema adherence when generating the output.
+ /// If set to true, the model will always follow the exact schema defined in the schema field.
+ ///
+ ///
+ /// Only a subset of JSON Schema is supported when strict is true.
+ ///
+ [Preserve]
+ [JsonProperty("strict")]
+ public bool Strict { get; }
+
+ ///
+ /// The schema for the response format, described as a JSON Schema object.
+ ///
+ [Preserve]
+ [JsonProperty("schema")]
+ public JToken Schema { get; }
+
+ [Preserve]
+ public static implicit operator ResponseFormatObject(JsonSchema jsonSchema) => new(jsonSchema);
+ }
+}
diff --git a/Runtime/Extensions/ResponseFormatConverter.cs.meta b/Runtime/Common/JsonSchema.cs.meta
similarity index 86%
rename from Runtime/Extensions/ResponseFormatConverter.cs.meta
rename to Runtime/Common/JsonSchema.cs.meta
index 694cea33..49f161bf 100644
--- a/Runtime/Extensions/ResponseFormatConverter.cs.meta
+++ b/Runtime/Common/JsonSchema.cs.meta
@@ -1,5 +1,5 @@
fileFormatVersion: 2
-guid: 2a452bd27a99e274ca2c88fd23c90840
+guid: 12986bfd513b9f840a2b9d1d06ed5ac6
MonoImporter:
externalObjects: {}
serializedVersion: 2
diff --git a/Runtime/Common/OpenAIBaseEndpoint.cs b/Runtime/Common/OpenAIBaseEndpoint.cs
index 5c4504b7..e6666a05 100644
--- a/Runtime/Common/OpenAIBaseEndpoint.cs
+++ b/Runtime/Common/OpenAIBaseEndpoint.cs
@@ -1,5 +1,7 @@
// Licensed under the MIT License. See LICENSE in the project root for license information.
+using System.Collections.Generic;
+using System.Linq;
using Utilities.WebRequestRest;
namespace OpenAI
@@ -7,5 +9,44 @@ namespace OpenAI
public abstract class OpenAIBaseEndpoint : BaseEndPoint
{
protected OpenAIBaseEndpoint(OpenAIClient client) : base(client) { }
+
+ ///
+ /// Indicates if the endpoint has an Azure Deployment.
+ ///
+ ///
+ /// If the endpoint is an Azure deployment, is true.
+ /// If it is not an Azure deployment, is false.
+ /// If it is not an Azure supported Endpoint, is null.
+ ///
+ protected virtual bool? IsAzureDeployment => null;
+
+ protected override string GetUrl(string endpoint = "", Dictionary queryParameters = null)
+ {
+ string route;
+
+ if (client.Settings.Info.IsAzureOpenAI && IsAzureDeployment == true)
+ {
+ route = $"{Root}deployments/{client.Settings.Info.DeploymentId}/{endpoint}";
+ }
+ else
+ {
+ route = $"{Root}{endpoint}";
+ }
+
+ var result = string.Format(client.Settings.Info.BaseRequestUrlFormat, route);
+
+ foreach (var defaultQueryParameter in client.Settings.Info.DefaultQueryParameters)
+ {
+ queryParameters ??= new Dictionary();
+ queryParameters.Add(defaultQueryParameter.Key, defaultQueryParameter.Value);
+ }
+
+ if (queryParameters is { Count: not 0 })
+ {
+ result += $"?{string.Join('&', queryParameters.Select(parameter => $"{parameter.Key}={parameter.Value}"))}";
+ }
+
+ return result;
+ }
}
}
diff --git a/Runtime/Common/ResponseFormatObject.cs b/Runtime/Common/ResponseFormatObject.cs
index e69de29b..6479a778 100644
--- a/Runtime/Common/ResponseFormatObject.cs
+++ b/Runtime/Common/ResponseFormatObject.cs
@@ -0,0 +1,54 @@
+// Licensed under the MIT License. See LICENSE in the project root for license information.
+
+using Newtonsoft.Json;
+using UnityEngine.Scripting;
+
+namespace OpenAI
+{
+ [Preserve]
+ public sealed class ResponseFormatObject
+ {
+ [Preserve]
+ public ResponseFormatObject() => Type = ChatResponseFormat.Text;
+
+ [Preserve]
+ public ResponseFormatObject(ChatResponseFormat type)
+ {
+ if (type == ChatResponseFormat.JsonSchema)
+ {
+ throw new System.ArgumentException("Use the constructor overload that accepts a JsonSchema object for ChatResponseFormat.JsonSchema.", nameof(type));
+ }
+ Type = type;
+ }
+
+ [Preserve]
+ public ResponseFormatObject(JsonSchema schema)
+ {
+ Type = ChatResponseFormat.JsonSchema;
+ JsonSchema = schema;
+ }
+
+ [Preserve]
+ [JsonConstructor]
+ internal ResponseFormatObject(
+ [JsonProperty("type")] ChatResponseFormat type,
+ [JsonProperty("json_schema")] JsonSchema schema)
+ {
+ Type = type;
+ JsonSchema = schema;
+ }
+
+ [Preserve]
+ [JsonProperty("type", DefaultValueHandling = DefaultValueHandling.Include)]
+ public ChatResponseFormat Type { get; private set; }
+
+ [Preserve]
+ [JsonProperty("json_schema", DefaultValueHandling = DefaultValueHandling.Ignore)]
+ public JsonSchema JsonSchema { get; private set; }
+
+ public static implicit operator ResponseFormatObject(ChatResponseFormat type) => new(type);
+
+ [Preserve]
+ public static implicit operator ChatResponseFormat(ResponseFormatObject format) => format.Type;
+ }
+}
diff --git a/Runtime/Common/Tool.cs b/Runtime/Common/Tool.cs
index 2eef9050..4f6b6941 100644
--- a/Runtime/Common/Tool.cs
+++ b/Runtime/Common/Tool.cs
@@ -241,7 +241,7 @@ where method.IsStatic
where functionAttribute != null
let name = GetFunctionName(type, method)
let description = functionAttribute.Description
- select Function.GetOrCreateFunction(name, description, method)
+ select Function.GetOrCreateFunction(name, description, method, strict: true)
into function
select new Tool(function));
@@ -379,7 +379,7 @@ private static Tool GetOrCreateToolInternal(Type type, MethodInfo method, string
return tool;
}
- tool = new Tool(Function.GetOrCreateFunction(functionName, description ?? string.Empty, method, instance));
+ tool = new Tool(Function.GetOrCreateFunction(functionName, description, method, instance, strict: true));
toolCache.Add(tool);
return tool;
}
diff --git a/Runtime/Embeddings/EmbeddingsEndpoint.cs b/Runtime/Embeddings/EmbeddingsEndpoint.cs
index 6ff3fc51..479de1b8 100644
--- a/Runtime/Embeddings/EmbeddingsEndpoint.cs
+++ b/Runtime/Embeddings/EmbeddingsEndpoint.cs
@@ -20,6 +20,8 @@ internal EmbeddingsEndpoint(OpenAIClient client) : base(client) { }
///
protected override string Root => "embeddings";
+ protected override bool? IsAzureDeployment => true;
+
///
/// Creates an embedding vector representing the input text.
///
diff --git a/Runtime/Extensions/ResponseFormatConverter.cs b/Runtime/Extensions/ResponseFormatConverter.cs
deleted file mode 100644
index 1ff40525..00000000
--- a/Runtime/Extensions/ResponseFormatConverter.cs
+++ /dev/null
@@ -1,82 +0,0 @@
-// Licensed under the MIT License. See LICENSE in the project root for license information.
-
-using Newtonsoft.Json;
-using System;
-using UnityEngine.Scripting;
-
-namespace OpenAI.Extensions
-{
- [Preserve]
- internal sealed class ResponseFormatConverter : JsonConverter
- {
- [Preserve]
- private sealed class ResponseFormatObject
- {
- [Preserve]
- public ResponseFormatObject() => Type = ChatResponseFormat.Text;
-
- [Preserve]
- [JsonConstructor]
- public ResponseFormatObject([JsonProperty("type")] ChatResponseFormat type) => Type = type;
-
- [Preserve]
- [JsonProperty("type", DefaultValueHandling = DefaultValueHandling.Include)]
- public ChatResponseFormat Type { get; private set; }
-
- [Preserve]
- public static implicit operator ResponseFormatObject(ChatResponseFormat type) => new(type);
-
- [Preserve]
- public static implicit operator ChatResponseFormat(ResponseFormatObject format) => format.Type;
- }
-
- [Preserve]
- public override ChatResponseFormat ReadJson(JsonReader reader, Type objectType, ChatResponseFormat existingValue, bool hasExistingValue, JsonSerializer serializer)
- {
- try
- {
- if (reader.TokenType is JsonToken.Null or JsonToken.String)
- {
- return ChatResponseFormat.Auto;
- }
-
- return serializer.Deserialize(reader);
- }
- catch (Exception e)
- {
- throw new JsonSerializationException($"Error reading {nameof(ChatResponseFormat)} from JSON", e);
-
- }
- }
-
- [Preserve]
- public override void WriteJson(JsonWriter writer, ChatResponseFormat value, JsonSerializer serializer)
- {
- const string type = nameof(type);
- const string text = nameof(text);
- // ReSharper disable once InconsistentNaming
- const string json_object = nameof(json_object);
-
- switch (value)
- {
- case ChatResponseFormat.Auto:
- writer.WriteNull();
- break;
- case ChatResponseFormat.Text:
- writer.WriteStartObject();
- writer.WritePropertyName(type);
- writer.WriteValue(text);
- writer.WriteEndObject();
- break;
- case ChatResponseFormat.Json:
- writer.WriteStartObject();
- writer.WritePropertyName(type);
- writer.WriteValue(json_object);
- writer.WriteEndObject();
- break;
- default:
- throw new ArgumentOutOfRangeException(nameof(value), value, null);
- }
- }
- }
-}
diff --git a/Runtime/Extensions/TypeExtensions.cs b/Runtime/Extensions/TypeExtensions.cs
index 82ef1bdd..4a410b78 100644
--- a/Runtime/Extensions/TypeExtensions.cs
+++ b/Runtime/Extensions/TypeExtensions.cs
@@ -11,15 +11,10 @@ namespace OpenAI.Extensions
{
internal static class TypeExtensions
{
- public static JObject GenerateJsonSchema(this MethodInfo methodInfo)
+ public static JObject GenerateJsonSchema(this MethodInfo methodInfo, JsonSerializer serializer = null)
{
var parameters = methodInfo.GetParameters();
- if (parameters.Length == 0)
- {
- return null;
- }
-
var schema = new JObject
{
["type"] = "object",
@@ -29,10 +24,7 @@ public static JObject GenerateJsonSchema(this MethodInfo methodInfo)
foreach (var parameter in parameters)
{
- if (parameter.ParameterType == typeof(CancellationToken))
- {
- continue;
- }
+ if (parameter.ParameterType == typeof(CancellationToken)) { continue; }
if (string.IsNullOrWhiteSpace(parameter.Name))
{
@@ -44,7 +36,7 @@ public static JObject GenerateJsonSchema(this MethodInfo methodInfo)
requiredParameters.Add(parameter.Name);
}
- schema["properties"]![parameter.Name] = GenerateJsonSchema(parameter.ParameterType, schema);
+ schema["properties"]![parameter.Name] = GenerateJsonSchema(parameter.ParameterType, schema, serializer);
var functionParameterAttribute = parameter.GetCustomAttribute();
@@ -59,11 +51,13 @@ public static JObject GenerateJsonSchema(this MethodInfo methodInfo)
schema["required"] = requiredParameters;
}
+ schema["additionalProperties"] = false;
return schema;
}
- public static JObject GenerateJsonSchema(this Type type, JObject rootSchema)
+ public static JObject GenerateJsonSchema(this Type type, JObject rootSchema, JsonSerializer serializer = null)
{
+ serializer ??= OpenAIClient.JsonSerializer;
var schema = new JObject();
if (!type.IsPrimitive &&
@@ -118,7 +112,7 @@ public static JObject GenerateJsonSchema(this Type type, JObject rootSchema)
foreach (var value in Enum.GetValues(type))
{
- ((JArray)schema["enum"]).Add(JToken.FromObject(value, OpenAIClient.JsonSerializer));
+ ((JArray)schema["enum"]).Add(JToken.FromObject(value, serializer));
}
}
else if (type.IsArray || (type.IsGenericType && type.GetGenericTypeDefinition() == typeof(List<>)))
@@ -127,7 +121,7 @@ public static JObject GenerateJsonSchema(this Type type, JObject rootSchema)
var elementType = type.GetElementType() ?? type.GetGenericArguments()[0];
if (rootSchema["definitions"] != null &&
- ((JObject)rootSchema["definitions"]).ContainsKey(elementType.FullName))
+ ((JObject)rootSchema["definitions"]).ContainsKey(elementType.FullName!))
{
schema["items"] = new JObject { ["$ref"] = $"#/definitions/{elementType.FullName}" };
}
@@ -140,7 +134,7 @@ public static JObject GenerateJsonSchema(this Type type, JObject rootSchema)
{
schema["type"] = "object";
rootSchema["definitions"] ??= new JObject();
- rootSchema["definitions"][type.FullName] = new JObject();
+ rootSchema["definitions"][type.FullName!] = new JObject();
var properties = type.GetProperties(BindingFlags.Public | BindingFlags.Instance | BindingFlags.DeclaredOnly);
var fields = type.GetFields(BindingFlags.Public | BindingFlags.Instance | BindingFlags.DeclaredOnly);
@@ -187,7 +181,7 @@ public static JObject GenerateJsonSchema(this Type type, JObject rootSchema)
if (functionPropertyAttribute.DefaultValue != null)
{
- defaultValue = JToken.FromObject(functionPropertyAttribute.DefaultValue, OpenAIClient.JsonSerializer);
+ defaultValue = JToken.FromObject(functionPropertyAttribute.DefaultValue, serializer);
propertyInfo["default"] = defaultValue;
}
@@ -197,7 +191,7 @@ public static JObject GenerateJsonSchema(this Type type, JObject rootSchema)
foreach (var value in functionPropertyAttribute.PossibleValues)
{
- var @enum = JToken.FromObject(value, OpenAIClient.JsonSerializer);
+ var @enum = JToken.FromObject(value, serializer);
if (defaultValue == null)
{
@@ -253,6 +247,7 @@ public static JObject GenerateJsonSchema(this Type type, JObject rootSchema)
schema["required"] = requiredMembers;
}
+ schema["additionalProperties"] = false;
rootSchema["definitions"] ??= new JObject();
rootSchema["definitions"][type.FullName] = schema;
return new JObject { ["$ref"] = $"#/definitions/{type.FullName}" };
diff --git a/Runtime/Images/ImagesEndpoint.cs b/Runtime/Images/ImagesEndpoint.cs
index 14d4911e..693af1a8 100644
--- a/Runtime/Images/ImagesEndpoint.cs
+++ b/Runtime/Images/ImagesEndpoint.cs
@@ -26,6 +26,8 @@ internal ImagesEndpoint(OpenAIClient client) : base(client) { }
///
protected override string Root => "images";
+ protected override bool? IsAzureDeployment => true;
+
///
/// Creates an image given a prompt.
///
diff --git a/Runtime/Threads/CreateRunRequest.cs b/Runtime/Threads/CreateRunRequest.cs
index 662ef3db..97345f23 100644
--- a/Runtime/Threads/CreateRunRequest.cs
+++ b/Runtime/Threads/CreateRunRequest.cs
@@ -1,7 +1,6 @@
// Licensed under the MIT License. See LICENSE in the project root for license information.
using Newtonsoft.Json;
-using OpenAI.Extensions;
using System;
using System.Collections.Generic;
using System.Linq;
@@ -34,13 +33,13 @@ public CreateRunRequest(string assistantId, CreateRunRequest request)
request?.Metadata,
request?.Temperature,
request?.TopP,
- request?.Stream ?? false,
request?.MaxPromptTokens,
request?.MaxCompletionTokens,
request?.TruncationStrategy,
request?.ToolChoice as string ?? ((Tool)request?.ToolChoice)?.Function?.Name,
request?.ParallelToolCalls,
- request?.ResponseFormat ?? ChatResponseFormat.Auto)
+ request?.ResponseFormatObject?.JsonSchema,
+ request?.ResponseFormatObject ?? ChatResponseFormat.Text)
{
}
@@ -82,10 +81,6 @@ public CreateRunRequest(string assistantId, CreateRunRequest request)
/// So 0.1 means only the tokens comprising the top 10% probability mass are considered.
/// We generally recommend altering this or temperature but not both.
///
- ///
- /// If true, returns a stream of events that happen during the Run as server-sent events,
- /// terminating when the Run enters a terminal state with a 'data: [DONE]' message.
- ///
///
/// The maximum number of prompt tokens that may be used over the course of the run.
/// The run will make a best effort to use only the number of prompt tokens specified,
@@ -113,14 +108,18 @@ public CreateRunRequest(string assistantId, CreateRunRequest request)
///
/// Whether to enable parallel function calling during tool use.
///
+ ///
+ /// The to use for structured JSON outputs.
+ ///
+ ///
+ ///
///
/// An object specifying the format that the model must output.
- /// Setting to enables JSON mode,
+ /// Setting to or enables JSON mode,
/// which guarantees the message the model generates is valid JSON.
- /// Important: When using JSON mode you must still instruct the model to produce JSON yourself via some conversation message,
- /// for example via your system message. If you don't do this, the model may generate an unending stream of
- /// whitespace until the generation reaches the token limit, which may take a lot of time and give the appearance
- /// of a "stuck" request. Also note that the message content may be partial (i.e. cut off) if finish_reason="length",
+ /// Important: When using JSON mode, you must also instruct the model to produce JSON yourself via a system or user message.
+ /// Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit,
+ /// resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if finish_reason="length",
/// which indicates the generation exceeded max_tokens or the conversation exceeded the max context length.
///
[Preserve]
@@ -134,13 +133,13 @@ public CreateRunRequest(
IReadOnlyDictionary metadata = null,
double? temperature = null,
double? topP = null,
- bool stream = false,
int? maxPromptTokens = null,
int? maxCompletionTokens = null,
TruncationStrategy truncationStrategy = null,
string toolChoice = null,
bool? parallelToolCalls = null,
- ChatResponseFormat responseFormat = ChatResponseFormat.Auto)
+ JsonSchema jsonSchema = null,
+ ChatResponseFormat responseFormat = ChatResponseFormat.Text)
{
AssistantId = assistantId;
Model = model;
@@ -177,12 +176,19 @@ public CreateRunRequest(
Metadata = metadata;
Temperature = temperature;
TopP = topP;
- Stream = stream;
MaxPromptTokens = maxPromptTokens;
MaxCompletionTokens = maxCompletionTokens;
TruncationStrategy = truncationStrategy;
ParallelToolCalls = parallelToolCalls;
- ResponseFormat = responseFormat;
+
+ if (jsonSchema != null)
+ {
+ ResponseFormatObject = jsonSchema;
+ }
+ else
+ {
+ ResponseFormatObject = responseFormat;
+ }
}
///
@@ -313,7 +319,7 @@ public CreateRunRequest(
///
/// An object specifying the format that the model must output.
- /// Setting to enables JSON mode,
+ /// Setting to or enables JSON mode,
/// which guarantees the message the model generates is valid JSON.
///
///
@@ -325,7 +331,9 @@ public CreateRunRequest(
///
[Preserve]
[JsonProperty("response_format")]
- [JsonConverter(typeof(ResponseFormatConverter))]
- public ChatResponseFormat ResponseFormat { get; }
+ public ResponseFormatObject ResponseFormatObject { get; }
+
+ [JsonIgnore]
+ public ChatResponseFormat ResponseFormat => ResponseFormatObject ?? ChatResponseFormat.Auto;
}
}
diff --git a/Runtime/Threads/CreateThreadAndRunRequest.cs b/Runtime/Threads/CreateThreadAndRunRequest.cs
index 36d0072e..eac347fc 100644
--- a/Runtime/Threads/CreateThreadAndRunRequest.cs
+++ b/Runtime/Threads/CreateThreadAndRunRequest.cs
@@ -1,7 +1,6 @@
// Licensed under the MIT License. See LICENSE in the project root for license information.
using Newtonsoft.Json;
-using OpenAI.Extensions;
using System;
using System.Collections.Generic;
using System.Linq;
@@ -30,13 +29,13 @@ public CreateThreadAndRunRequest(string assistantId, CreateThreadAndRunRequest r
request?.Metadata,
request?.Temperature,
request?.TopP,
- request?.Stream ?? false,
request?.MaxPromptTokens,
request?.MaxCompletionTokens,
request?.TruncationStrategy,
request?.ToolChoice as string ?? ((Tool)request?.ToolChoice)?.Function?.Name,
request?.ParallelToolCalls,
- request?.ResponseFormat ?? ChatResponseFormat.Auto)
+ request?.ResponseFormatObject?.JsonSchema,
+ request?.ResponseFormat ?? ChatResponseFormat.Text)
{
}
@@ -81,10 +80,6 @@ public CreateThreadAndRunRequest(string assistantId, CreateThreadAndRunRequest r
/// So 0.1 means only the tokens comprising the top 10% probability mass are considered.
/// We generally recommend altering this or temperature but not both.
///
- ///
- /// If true, returns a stream of events that happen during the Run as server-sent events,
- /// terminating when the Run enters a terminal state with a 'data: [DONE]' message.
- ///
///
/// The maximum number of prompt tokens that may be used over the course of the run.
/// The run will make a best effort to use only the number of prompt tokens specified,
@@ -112,14 +107,18 @@ public CreateThreadAndRunRequest(string assistantId, CreateThreadAndRunRequest r
///
/// Whether to enable parallel function calling during tool use.
///
+ ///
+ /// The to use for structured JSON outputs.
+ ///
+ ///
+ ///
///
/// An object specifying the format that the model must output.
- /// Setting to enables JSON mode,
+ /// Setting to or enables JSON mode,
/// which guarantees the message the model generates is valid JSON.
- /// Important: When using JSON mode you must still instruct the model to produce JSON yourself via some conversation message,
- /// for example via your system message. If you don't do this, the model may generate an unending stream of
- /// whitespace until the generation reaches the token limit, which may take a lot of time and give the appearance
- /// of a "stuck" request. Also note that the message content may be partial (i.e. cut off) if finish_reason="length",
+ /// Important: When using JSON mode, you must also instruct the model to produce JSON yourself via a system or user message.
+ /// Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit,
+ /// resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if finish_reason="length",
/// which indicates the generation exceeded max_tokens or the conversation exceeded the max context length.
///
///
@@ -135,13 +134,13 @@ public CreateThreadAndRunRequest(
IReadOnlyDictionary metadata = null,
double? temperature = null,
double? topP = null,
- bool stream = false,
int? maxPromptTokens = null,
int? maxCompletionTokens = null,
TruncationStrategy truncationStrategy = null,
string toolChoice = null,
bool? parallelToolCalls = null,
- ChatResponseFormat responseFormat = ChatResponseFormat.Auto,
+ JsonSchema jsonSchema = null,
+ ChatResponseFormat responseFormat = ChatResponseFormat.Text,
CreateThreadRequest createThreadRequest = null)
{
AssistantId = assistantId;
@@ -178,12 +177,20 @@ public CreateThreadAndRunRequest(
Metadata = metadata;
Temperature = temperature;
TopP = topP;
- Stream = stream;
MaxPromptTokens = maxPromptTokens;
MaxCompletionTokens = maxCompletionTokens;
TruncationStrategy = truncationStrategy;
ParallelToolCalls = parallelToolCalls;
- ResponseFormat = responseFormat;
+
+ if (jsonSchema != null)
+ {
+ ResponseFormatObject = jsonSchema;
+ }
+ else
+ {
+ ResponseFormatObject = responseFormat;
+ }
+
ThreadRequest = createThreadRequest;
}
@@ -313,7 +320,7 @@ public CreateThreadAndRunRequest(
///
/// An object specifying the format that the model must output.
- /// Setting to enables JSON mode,
+ /// Setting to or enables JSON mode,
/// which guarantees the message the model generates is valid JSON.
///
///
@@ -325,8 +332,10 @@ public CreateThreadAndRunRequest(
///
[Preserve]
[JsonProperty("response_format")]
- [JsonConverter(typeof(ResponseFormatConverter))]
- public ChatResponseFormat ResponseFormat { get; }
+ public ResponseFormatObject ResponseFormatObject { get; }
+
+ [JsonIgnore]
+ public ChatResponseFormat ResponseFormat => ResponseFormatObject ?? ChatResponseFormat.Auto;
///
/// The optional options to use.
diff --git a/Runtime/Threads/MessageResponse.cs b/Runtime/Threads/MessageResponse.cs
index a88f27b9..817db169 100644
--- a/Runtime/Threads/MessageResponse.cs
+++ b/Runtime/Threads/MessageResponse.cs
@@ -213,7 +213,7 @@ internal void AppendFrom(MessageResponse other)
{
if (other == null) { return; }
- if (!string.IsNullOrWhiteSpace(Id))
+ if (!string.IsNullOrWhiteSpace(Id) && !string.IsNullOrWhiteSpace(other.Id))
{
if (Id != other.Id)
{
diff --git a/Runtime/Threads/RunResponse.cs b/Runtime/Threads/RunResponse.cs
index a1b0aadb..9241a62b 100644
--- a/Runtime/Threads/RunResponse.cs
+++ b/Runtime/Threads/RunResponse.cs
@@ -49,7 +49,7 @@ internal RunResponse(
[JsonProperty("truncation_strategy")] TruncationStrategy truncationStrategy,
[JsonProperty("tool_choice")] object toolChoice,
[JsonProperty("parallel_tool_calls")] bool parallelToolCalls,
- [JsonProperty("response_format")][JsonConverter(typeof(ResponseFormatConverter))] ChatResponseFormat responseFormat)
+ [JsonProperty("response_format")] ResponseFormatObject responseFormat)
{
Id = id;
Object = @object;
@@ -77,7 +77,7 @@ internal RunResponse(
TruncationStrategy = truncationStrategy;
ToolChoice = toolChoice;
ParallelToolCalls = parallelToolCalls;
- ResponseFormat = responseFormat;
+ ResponseFormatObject = responseFormat;
}
///
@@ -315,7 +315,7 @@ public DateTime? CompletedAt
///
/// Specifies the format that the model must output.
- /// Setting to enables JSON mode,
+ /// Setting to or enables JSON mode,
/// which guarantees the message the model generates is valid JSON.
///
///
@@ -327,8 +327,10 @@ public DateTime? CompletedAt
///
[Preserve]
[JsonProperty("response_format", DefaultValueHandling = DefaultValueHandling.Ignore)]
- [JsonConverter(typeof(ResponseFormatConverter))]
- public ChatResponseFormat ResponseFormat { get; private set; }
+ public ResponseFormatObject ResponseFormatObject { get; private set; }
+
+ [JsonIgnore]
+ public ChatResponseFormat ResponseFormat => ResponseFormatObject ?? ChatResponseFormat.Auto;
[Preserve]
public static implicit operator string(RunResponse run) => run?.ToString();
@@ -340,7 +342,7 @@ internal void AppendFrom(RunResponse other)
{
if (other is null) { return; }
- if (!string.IsNullOrWhiteSpace(Id))
+ if (!string.IsNullOrWhiteSpace(Id) && !string.IsNullOrWhiteSpace(other.Id))
{
if (Id != other.Id)
{
@@ -446,7 +448,11 @@ internal void AppendFrom(RunResponse other)
}
ParallelToolCalls = other.ParallelToolCalls;
- ResponseFormat = other.ResponseFormat;
+
+ if (other.ResponseFormatObject != null)
+ {
+ ResponseFormatObject = other.ResponseFormatObject;
+ }
}
}
}
diff --git a/Runtime/Threads/RunStepResponse.cs b/Runtime/Threads/RunStepResponse.cs
index 6e6b8a97..374434c9 100644
--- a/Runtime/Threads/RunStepResponse.cs
+++ b/Runtime/Threads/RunStepResponse.cs
@@ -225,7 +225,7 @@ internal void AppendFrom(RunStepResponse other)
{
if (other == null) { return; }
- if (!string.IsNullOrWhiteSpace(Id))
+ if (!string.IsNullOrWhiteSpace(Id) && !string.IsNullOrWhiteSpace(other.Id))
{
if (Id != other.Id)
{
diff --git a/Runtime/VectorStores/ChunkingStrategy.cs b/Runtime/VectorStores/ChunkingStrategy.cs
index 131fa82d..ffae18e2 100644
--- a/Runtime/VectorStores/ChunkingStrategy.cs
+++ b/Runtime/VectorStores/ChunkingStrategy.cs
@@ -1,7 +1,6 @@
// Licensed under the MIT License. See LICENSE in the project root for license information.
using Newtonsoft.Json;
-using Newtonsoft.Json.Converters;
using UnityEngine.Scripting;
namespace OpenAI.VectorStores
diff --git a/Tests/TestFixture_00_01_Authentication.cs b/Tests/TestFixture_00_01_Authentication.cs
index b69b4064..eca62200 100644
--- a/Tests/TestFixture_00_01_Authentication.cs
+++ b/Tests/TestFixture_00_01_Authentication.cs
@@ -186,6 +186,7 @@ public void Test_11_AzureConfigurationSettings()
var auth = new OpenAIAuthentication("testKeyAaBbCcDd");
var settings = new OpenAISettings(resourceName: "test-resource", deploymentId: "deployment-id-test");
var api = new OpenAIClient(auth, settings);
+ Debug.Log(api.Settings.Info.DeploymentId);
Debug.Log(api.Settings.Info.BaseRequest);
Debug.Log(api.Settings.Info.BaseRequestUrlFormat);
}
diff --git a/Tests/TestFixture_03_Threads.cs b/Tests/TestFixture_03_Threads.cs
index 75eed24c..a27ccba0 100644
--- a/Tests/TestFixture_03_Threads.cs
+++ b/Tests/TestFixture_03_Threads.cs
@@ -537,7 +537,10 @@ public async Task Test_04_02_CreateThreadAndRun_Streaming()
public async Task Test_04_03_CreateThreadAndRun_Streaming_ToolCalls()
{
Assert.NotNull(OpenAIClient.ThreadsEndpoint);
- var tools = Tool.GetAllAvailableTools();
+ var tools = new List
+ {
+ Tool.GetOrCreateTool(typeof(DateTimeUtility), nameof(DateTimeUtility.GetDateTime))
+ };
var assistantRequest = new CreateAssistantRequest(
instructions: "You are a helpful assistant.",
tools: tools);
@@ -712,5 +715,90 @@ public async Task Test_04_04_CreateThreadAndRun_SubmitToolOutput()
}
}
}
+
+ [Test]
+ public async Task Test_05_01_CreateThreadAndRun_StructuredOutputs_Streaming()
+ {
+ Assert.NotNull(OpenAIClient.ThreadsEndpoint);
+ var mathSchema = new JsonSchema("math_response", @"
+{
+ ""type"": ""object"",
+ ""properties"": {
+ ""steps"": {
+ ""type"": ""array"",
+ ""items"": {
+ ""type"": ""object"",
+ ""properties"": {
+ ""explanation"": {
+ ""type"": ""string""
+ },
+ ""output"": {
+ ""type"": ""string""
+ }
+ },
+ ""required"": [
+ ""explanation"",
+ ""output""
+ ],
+ ""additionalProperties"": false
+ }
+ },
+ ""final_answer"": {
+ ""type"": ""string""
+ }
+ },
+ ""required"": [
+ ""steps"",
+ ""final_answer""
+ ],
+ ""additionalProperties"": false
+}");
+ var assistant = await OpenAIClient.AssistantsEndpoint.CreateAssistantAsync(
+ new CreateAssistantRequest(
+ name: "Math Tutor",
+ instructions: "You are a helpful math tutor. Guide the user through the solution step by step.",
+ model: "gpt-4o-2024-08-06",
+ jsonSchema: mathSchema));
+ Assert.NotNull(assistant);
+ ThreadResponse thread = null;
+
+ try
+ {
+ var run = await assistant.CreateThreadAndRunAsync("how can I solve 8x + 7 = -23",
+ async @event =>
+ {
+ Debug.Log(@event.ToJsonString());
+ await Task.CompletedTask;
+ });
+
+ Assert.IsNotNull(run);
+ thread = await run.GetThreadAsync();
+ run = await run.WaitForStatusChangeAsync();
+ Assert.IsNotNull(run);
+ Assert.IsTrue(run.Status == RunStatus.Completed);
+ Debug.Log($"Created thread and run: {run.ThreadId} -> {run.Id} -> {run.CreatedAt}");
+ Assert.NotNull(thread);
+ var messages = await thread.ListMessagesAsync();
+
+ foreach (var response in messages.Items)
+ {
+ Debug.Log($"{response.Role}: {response.PrintContent()}");
+ }
+ }
+ catch (Exception e)
+ {
+ Debug.LogException(e);
+ }
+ finally
+ {
+ await assistant.DeleteAsync(deleteToolResources: thread == null);
+
+ if (thread != null)
+ {
+ var isDeleted = await thread.DeleteAsync(deleteToolResources: true);
+ Assert.IsTrue(isDeleted);
+ }
+ }
+ }
}
}
diff --git a/Tests/TestFixture_04_Chat.cs b/Tests/TestFixture_04_Chat.cs
index 383d78d3..595d17fa 100644
--- a/Tests/TestFixture_04_Chat.cs
+++ b/Tests/TestFixture_04_Chat.cs
@@ -4,6 +4,7 @@
using OpenAI.Chat;
using OpenAI.Models;
using OpenAI.Tests.Weather;
+using System;
using System.Collections.Generic;
using System.Linq;
using System.Threading.Tasks;
@@ -273,7 +274,7 @@ public async Task Test_02_03_ChatCompletion_Multiple_Tools_Streaming()
};
var tools = Tool.GetAllAvailableTools(false, forceUpdate: true, clearCache: true);
- var chatRequest = new ChatRequest(messages, model: Model.GPT4o, tools: tools, toolChoice: "auto");
+ var chatRequest = new ChatRequest(messages, model: Model.GPT4o, tools: tools, toolChoice: "auto", parallelToolCalls: true);
var response = await OpenAIClient.ChatEndpoint.StreamCompletionAsync(chatRequest, partialResponse =>
{
Assert.IsNotNull(partialResponse);
@@ -488,5 +489,135 @@ public async Task Test_04_02_GetChatLogProbsStreaming()
Debug.Log(response.ToString());
response.GetUsage();
}
+
+
+ [Test]
+ public async Task Test_06_01_GetChat_JsonSchema()
+ {
+ Assert.IsNotNull(OpenAIClient.ChatEndpoint);
+
+ var messages = new List
+ {
+ new(Role.System, "You are a helpful math tutor. Guide the user through the solution step by step."),
+ new(Role.User, "how can I solve 8x + 7 = -23")
+ };
+
+ var mathSchema = new JsonSchema("math_response", @"
+{
+ ""type"": ""object"",
+ ""properties"": {
+ ""steps"": {
+ ""type"": ""array"",
+ ""items"": {
+ ""type"": ""object"",
+ ""properties"": {
+ ""explanation"": {
+ ""type"": ""string""
+ },
+ ""output"": {
+ ""type"": ""string""
+ }
+ },
+ ""required"": [
+ ""explanation"",
+ ""output""
+ ],
+ ""additionalProperties"": false
+ }
+ },
+ ""final_answer"": {
+ ""type"": ""string""
+ }
+ },
+ ""required"": [
+ ""steps"",
+ ""final_answer""
+ ],
+ ""additionalProperties"": false
+}");
+ var chatRequest = new ChatRequest(messages, model: new("gpt-4o-2024-08-06"), jsonSchema: mathSchema);
+ var response = await OpenAIClient.ChatEndpoint.GetCompletionAsync(chatRequest);
+ Assert.IsNotNull(response);
+ Assert.IsNotNull(response.Choices);
+ Assert.IsNotEmpty(response.Choices);
+
+ foreach (var choice in response.Choices)
+ {
+ Debug.Log($"[{choice.Index}] {choice.Message.Role}: {choice} | Finish Reason: {choice.FinishReason}");
+ }
+
+ response.GetUsage();
+ }
+
+ [Test]
+ public async Task Test_06_01_GetChat_JsonSchema_Streaming()
+ {
+ Assert.IsNotNull(OpenAIClient.ChatEndpoint);
+
+ var messages = new List
+ {
+ new(Role.System, "You are a helpful math tutor. Guide the user through the solution step by step."),
+ new(Role.User, "how can I solve 8x + 7 = -23")
+ };
+
+ var mathSchema = new JsonSchema("math_response", @"
+{
+ ""type"": ""object"",
+ ""properties"": {
+ ""steps"": {
+ ""type"": ""array"",
+ ""items"": {
+ ""type"": ""object"",
+ ""properties"": {
+ ""explanation"": {
+ ""type"": ""string""
+ },
+ ""output"": {
+ ""type"": ""string""
+ }
+ },
+ ""required"": [
+ ""explanation"",
+ ""output""
+ ],
+ ""additionalProperties"": false
+ }
+ },
+ ""final_answer"": {
+ ""type"": ""string""
+ }
+ },
+ ""required"": [
+ ""steps"",
+ ""final_answer""
+ ],
+ ""additionalProperties"": false
+}");
+ var chatRequest = new ChatRequest(messages, model: "gpt-4o-2024-08-06", jsonSchema: mathSchema);
+ var cumulativeDelta = string.Empty;
+ var response = await OpenAIClient.ChatEndpoint.StreamCompletionAsync(chatRequest, partialResponse =>
+ {
+ Assert.IsNotNull(partialResponse);
+ if (partialResponse.Usage != null) { return; }
+ Assert.NotNull(partialResponse.Choices);
+ Assert.NotZero(partialResponse.Choices.Count);
+
+ foreach (var choice in partialResponse.Choices.Where(choice => choice.Delta?.Content != null))
+ {
+ cumulativeDelta += choice.Delta.Content;
+ }
+ }, true);
+ Assert.IsNotNull(response);
+ Assert.IsNotNull(response.Choices);
+ var choice = response.FirstChoice;
+ Assert.IsNotNull(choice);
+ Assert.IsNotNull(choice.Message);
+ Assert.IsFalse(string.IsNullOrEmpty(choice.ToString()));
+ Debug.Log($"[{choice.Index}] {choice.Message.Role}: {choice} | Finish Reason: {choice.FinishReason}");
+ Assert.IsTrue(choice.Message.Role == Role.Assistant);
+ Assert.IsTrue(choice.Message.Content!.Equals(cumulativeDelta));
+ Debug.Log(response.ToString());
+ response.GetUsage();
+ }
}
}
diff --git a/Tests/Weather/WeatherService.cs b/Tests/Weather/WeatherService.cs
index 30f2c9c1..756d04c0 100644
--- a/Tests/Weather/WeatherService.cs
+++ b/Tests/Weather/WeatherService.cs
@@ -31,4 +31,11 @@ public static async Task GetCurrentWeatherAsync(
public static int CelsiusToFahrenheit(int celsius) => (celsius * 9 / 5) + 32;
}
+
+ internal static class DateTimeUtility
+ {
+ [Function("Get the current date and time.")]
+ public static async Task GetDateTime()
+ => await Task.FromResult(DateTimeOffset.Now.ToString());
+ }
}
diff --git a/package.json b/package.json
index 5e96159b..4567222b 100644
--- a/package.json
+++ b/package.json
@@ -3,7 +3,7 @@
"displayName": "OpenAI",
"description": "A OpenAI package for the Unity Game Engine to use GPT-4, GPT-3.5, GPT-3 and Dall-E though their RESTful API (currently in beta).\n\nIndependently developed, this is not an official library and I am not affiliated with OpenAI.\n\nAn OpenAI API account is required.",
"keywords": [],
- "version": "8.1.2",
+ "version": "8.2.0",
"unity": "2021.3",
"documentationUrl": "https://github.com/RageAgainstThePixel/com.openai.unity#documentation",
"changelogUrl": "https://github.com/RageAgainstThePixel/com.openai.unity/releases",
@@ -17,8 +17,8 @@
"url": "https://github.com/StephenHodgson"
},
"dependencies": {
- "com.utilities.rest": "3.2.1",
- "com.utilities.encoder.wav": "1.2.1"
+ "com.utilities.rest": "3.2.3",
+ "com.utilities.encoder.wav": "1.2.2"
},
"samples": [
{