Skip to content

Commit

Permalink
com.openai.unity 3.0.0 (#36)
Browse files Browse the repository at this point in the history
Closes #35
  • Loading branch information
StephenHodgson authored Mar 2, 2023
1 parent f15fe84 commit 0933666
Show file tree
Hide file tree
Showing 25 changed files with 495 additions and 12 deletions.
35 changes: 33 additions & 2 deletions Documentation~/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,10 @@ The recommended installation method is though the unity package manager and [Ope
- [Retrieve Models](#retrieve-model)
- [Delete Fine Tuned Model](#delete-fine-tuned-model)
- [Completions](#completions)
- [Streaming](#streaming)
- [Streaming](#completion-streaming)
- [Chat](#chat)
- [Chat Completions](#chat-completions)
- [Streaming](#chat-streaming)
- [Edits](#edits)
- [Create Edit](#create-edit)
- [Embeddings](#embeddings)
Expand Down Expand Up @@ -200,7 +203,7 @@ Debug.Log(result);

> To get the `CompletionResult` (which is mostly metadata), use its implicit string operator to get the text if all you want is the completion choice.
#### Streaming
#### Completion Streaming

Streaming allows you to get results are they are generated, which can help your application feel more responsive, especially on slow models like Davinci.

Expand All @@ -226,6 +229,34 @@ await foreach (var token in api.CompletionsEndpoint.StreamCompletionEnumerableAs
}
```

### [Chat](https://platform.openai.com/docs/api-reference/chat)

Given a chat conversation, the model will return a chat completion response.

#### [Chat Completions](https://platform.openai.com/docs/api-reference/chat/create)

Creates a completion for the chat message

```csharp
IClient(OpenAIAuthentication.LoadFromEnv());
var chatPrompts = new List<ChatPrompt>
{
new ChatPrompt("system", "You are a helpful assistant."),
new ChatPrompt("user", "Who won the world series in 2020?"),
new ChatPrompt("assistant", "The Los Angeles Dodgers won the World Series in 2020."),
new ChatPrompt("user", "Where was it played?"),
};
var chatRequest = new ChatRequest(chatPrompts);
var result = await api.ChatEndpoint.GetCompletionAsync(chatRequest);
Debug.Log(result.Fi
```

##### [Chat Streaming](https://platform.openai.com/docs/api-reference/chat/create#chat/create-stream)
```csharp
TODO
```

### [Edits](https://beta.openai.com/docs/api-reference/edits)
Given a prompt and an instruction, the model will return an edited version of the prompt.
Expand Down
8 changes: 8 additions & 0 deletions Runtime/Chat.meta

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

32 changes: 32 additions & 0 deletions Runtime/Chat/ChatEndpoint.cs
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
// Licensed under the MIT License. See LICENSE in the project root for license information.

using Newtonsoft.Json;
using System.Threading;
using System.Threading.Tasks;

namespace OpenAI.Chat
{
public sealed class ChatEndpoint : BaseEndPoint
{
public ChatEndpoint(OpenAIClient api) : base(api) { }

protected override string GetEndpoint()
=> $"{Api.BaseUrl}chat";

/// <summary>
/// Creates a completion for the chat message
/// </summary>
/// <param name="chatRequest">The chat request which contains the message content.</param>
/// <param name="cancellationToken">Optional, <see cref="CancellationToken"/>.</param>
/// <returns><see cref="ChatResponse"/>.</returns>
public async Task<ChatResponse> GetCompletionAsync(ChatRequest chatRequest, CancellationToken cancellationToken = default)
{
var payload = JsonConvert.SerializeObject(chatRequest, Api.JsonSerializationOptions).ToJsonStringContent();
var result = await Api.Client.PostAsync($"{GetEndpoint()}/completions", payload, cancellationToken);
var resultAsString = await result.ReadAsStringAsync(true);
return JsonConvert.DeserializeObject<ChatResponse>(resultAsString, Api.JsonSerializationOptions);
}

// TODO Streaming endpoints
}
}
11 changes: 11 additions & 0 deletions Runtime/Chat/ChatEndpoint.cs.meta

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

27 changes: 27 additions & 0 deletions Runtime/Chat/ChatPrompt.cs
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
// Licensed under the MIT License. See LICENSE in the project root for license information.

using Newtonsoft.Json;

namespace OpenAI.Chat
{
public sealed class ChatPrompt
{
[JsonConstructor]
public ChatPrompt(
[JsonProperty("role")] string role,
[JsonProperty("content")] string content
)
{
Role = role;
Content = content;
}

[JsonProperty("role")]
public string Role { get; }

[JsonProperty("content")]
public string Content { get; }

public override string ToString() => JsonConvert.SerializeObject(this);
}
}
11 changes: 11 additions & 0 deletions Runtime/Chat/ChatPrompt.cs.meta

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

133 changes: 133 additions & 0 deletions Runtime/Chat/ChatRequest.cs
Original file line number Diff line number Diff line change
@@ -0,0 +1,133 @@
// Licensed under the MIT License. See LICENSE in the project root for license information.

using Newtonsoft.Json;
using OpenAI.Models;
using System;
using System.Collections.Generic;
using System.Linq;

namespace OpenAI.Chat
{
public sealed class ChatRequest
{
[JsonConstructor]
public ChatRequest(
[JsonProperty("messages")] IEnumerable<ChatPrompt> messages,
[JsonProperty("model")] Model model = null,
double? temperature = null,
double? topP = null,
int? number = null,
string[] stops = null,
double? presencePenalty = null,
double? frequencyPenalty = null,
string user = null)
{
const string defaultModel = "gpt-3.5-turbo";
Model = model ?? new Model(defaultModel);

if (!Model.Contains(defaultModel))
{
throw new ArgumentException(nameof(model), $"{Model} not supported");
}

Messages = messages.ToList();
Temperature = temperature;
TopP = topP;
Number = number;
Stops = stops;
PresencePenalty = presencePenalty;
FrequencyPenalty = frequencyPenalty;
User = user;
}

/// <summary>
/// ID of the model to use. Currently, only gpt-3.5-turbo and gpt-3.5-turbo-0301 are supported.
/// </summary>
[JsonProperty("model")]
private string Model { get; }

/// <summary>
/// The messages to generate chat completions for, in the chat format.
/// </summary>
[JsonProperty("messages")]
public IReadOnlyList<ChatPrompt> Messages { get; }

/// <summary>
/// What sampling temperature to use, between 0 and 2.
/// Higher values like 0.8 will make the output more random, while lower values like 0.2 will
/// make it more focused and deterministic.
/// We generally recommend altering this or top_p but not both.<br/>
/// Defaults to 1
/// </summary>
[JsonProperty("temperature")]
public double? Temperature { get; }

/// <summary>
/// An alternative to sampling with temperature, called nucleus sampling,
/// where the model considers the results of the tokens with top_p probability mass.
/// So 0.1 means only the tokens comprising the top 10% probability mass are considered.
/// We generally recommend altering this or temperature but not both.<br/>
/// Defaults to 1
/// </summary>
[JsonProperty("top_p")]
public double? TopP { get; }

/// <summary>
/// How many chat completion choices to generate for each input message.<br/>
/// Defaults to 1
/// </summary>
[JsonProperty("number")]
public int? Number { get; }

/// <summary>
/// Specifies where the results should stream and be returned at one time.
/// Do not set this yourself, use the appropriate methods on <see cref="ChatEndpoint"/> instead.<br/>
/// Defaults to false
/// </summary>
[JsonProperty("stream")]
public bool Stream { get; internal set; }

/// <summary>
/// Up to 4 sequences where the API will stop generating further tokens.
/// </summary>
[JsonProperty("stop")]
public string[] Stops { get; }

/// <summary>
/// Number between -2.0 and 2.0.
/// Positive values penalize new tokens based on whether they appear in the text so far,
/// increasing the model's likelihood to talk about new topics.<br/>
/// Defaults to 0
/// </summary>
[JsonProperty("presence_penalty")]
public double? PresencePenalty { get; }

/// <summary>
/// Number between -2.0 and 2.0.
/// Positive values penalize new tokens based on their existing frequency in the text so far,
/// decreasing the model's likelihood to repeat the same line verbatim.<br/>
/// Defaults to 0
/// </summary>
[JsonProperty("frequency_penalty")]
public double? FrequencyPenalty { get; }

/// <summary>Modify the likelihood of specified tokens appearing in the completion.
/// Accepts a json object that maps tokens(specified by their token ID in the tokenizer)
/// to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits
/// generated by the model prior to sampling.The exact effect will vary per model, but values between
/// -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result
/// in a ban or exclusive selection of the relevant token.<br/>
/// Defaults to null
/// </summary>
[JsonProperty("logit_bias")]
public Dictionary<string, double> LogitBias { get; set; }

/// <summary>
/// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.
/// </summary>
[JsonProperty("user")]
public string User { get; }

public override string ToString() => JsonConvert.SerializeObject(this);
}
}
11 changes: 11 additions & 0 deletions Runtime/Chat/ChatRequest.cs.meta

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

52 changes: 52 additions & 0 deletions Runtime/Chat/ChatResponse.cs
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
// Licensed under the MIT License. See LICENSE in the project root for license information.

using Newtonsoft.Json;
using System.Collections.Generic;
using System.Linq;

namespace OpenAI.Chat
{
public sealed class ChatResponse
{
[JsonConstructor]
public ChatResponse(
[JsonProperty("id")] string id,
[JsonProperty("object")] string @object,
[JsonProperty("created")] int created,
[JsonProperty("model")] string model,
[JsonProperty("usage")] Usage usage,
[JsonProperty("choices")] List<Choice> choices
)
{
Id = id;
Object = @object;
Created = created;
Model = model;
Usage = usage;
Choices = choices;
}

[JsonProperty("id")]
public string Id { get; }

[JsonProperty("object")]
public string Object { get; }

[JsonProperty("created")]
public int Created { get; }

[JsonProperty("model")]
public string Model { get; }

[JsonProperty("usage")]
public Usage Usage { get; }

[JsonProperty("choices")]
public IReadOnlyList<Choice> Choices { get; }

[JsonIgnore]
public Choice FirstChoice => Choices.FirstOrDefault();

public override string ToString() => JsonConvert.SerializeObject(this);
}
}
11 changes: 11 additions & 0 deletions Runtime/Chat/ChatResponse.cs.meta

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

33 changes: 33 additions & 0 deletions Runtime/Chat/Choice.cs
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
// Licensed under the MIT License. See LICENSE in the project root for license information.

using Newtonsoft.Json;

namespace OpenAI.Chat
{
public sealed class Choice
{
[JsonConstructor]
public Choice(
[JsonProperty("message")] Message message,
[JsonProperty("finish_reason")] string finishReason,
[JsonProperty("index")] int index)
{
Message = message;
FinishReason = finishReason;
Index = index;
}

[JsonProperty("message")]
public Message Message { get; }

[JsonProperty("finish_reason")]
public string FinishReason { get; }

[JsonProperty("index")]
public int Index { get; }

public override string ToString() => Message.ToString();

public static implicit operator string(Choice choice) => choice.ToString();
}
}
Loading

0 comments on commit 0933666

Please sign in to comment.