Skip to content

Commit

Permalink
Merge pull request #79 from meysamhadeli/chore/update-config-with-usi…
Browse files Browse the repository at this point in the history
…ng-base-url

chore: simplify config for using base url
  • Loading branch information
meysamhadeli authored Nov 16, 2024
2 parents d7dc4fa + 50d41c8 commit dfdae55
Show file tree
Hide file tree
Showing 10 changed files with 435 additions and 93 deletions.
19 changes: 9 additions & 10 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -61,20 +61,19 @@ The `config` file should be like following example base on your `AI provider`:
**config.yml**
```yml
ai_provider_config:
provider_name: "openai" # openai | ollama | azure-openai
chat_completion_url: "https://api.openai.com/v1/chat/completions"
provider_name: "openai" # openai | ollama | azure-openai
base_url: "https://api.openai.com" # "http://localhost:11434" | "https://test,openai.azure.com"
chat_completion_model: "gpt-4o"
embedding_url: "https://api.openai.com/v1/embeddings" #(Optional, If you want use RAG.)
embedding_model: "text-embedding-3-small" #(Optional, If you want use RAG.)
embedding_model: "text-embedding-3-small" #(Optional, If you want use RAG.)
chat_api_version: "2024-04-01-preview" #(Optional, If your AI provider like AzureOpenai has chat api version.)
embeddings_api_version: "2024-01-01-preview" #(Optional, If your AI provider like AzureOpenai has embeddings api version.)
temperature: 0.2
threshold: 0.3 #(Optional, If you want use RAG.)
threshold: 0.3 #(Optional, If you want use RAG.)
theme: "dracula"
rag: true #(Optional, If you want use RAG.)
rag: true #(Optional, If you want use RAG.)
```
> Note: For `ollama` provider use base url `http://localhost:11434` for chat, embeddings url and also use your `desire models` ollama for chat, embeddings model.

> Note: We used the standard integration of [OpenAI APIs](https://platform.openai.com/docs/api-reference/introduction) and [Ollama APIs](https://github.com/ollama/ollama/blob/main/docs/api.md) and you can find more details in documentation of each APIs.
> Note: We used the standard integration of [OpenAI APIs](https://platform.openai.com/docs/api-reference/introduction), [Ollama APIs](https://github.com/ollama/ollama/blob/main/docs/api.md) and [Azure Openai](https://learn.microsoft.com/en-us/azure/ai-services/openai/reference) and you can find more details in documentation of each APIs.
If you wish to customize your configuration, you can create your own `config.yml` file and place it in the `root directory` of `each project` you want to analyze with codai. If `no configuration` file is provided, codai will use the `default settings`.

Expand All @@ -84,7 +83,7 @@ codai code --config ./config.yml
```
Additionally, you can pass configuration options directly in the command line. For example:
```bash
codai code --provider_name openapi --temperature 0.8
codai code --provider_name openapi --temperature 0.8 --chat_api_key test-chat-key --embeddings_api_key test-embeddings-key
```
This flexibility allows you to customize config of codai on the fly.

Expand Down
45 changes: 25 additions & 20 deletions config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,21 +19,22 @@ type Config struct {

// Default configuration values
var defaultConfig = Config{
Version: "1.6.3",
Version: "1.7.1",
Theme: "dracula",
RAG: true,
AIProviderConfig: &providers.AIProviderConfig{
ProviderName: "openai",
EmbeddingURL: "https://api.openai.com/v1/embeddings",
ChatCompletionURL: "https://api.openai.com/v1/chat/completions",
ChatCompletionModel: "gpt-4o",
EmbeddingModel: "text-embedding-3-small",
Stream: true,
EncodingFormat: "float",
Temperature: 0.2,
Threshold: 0,
ChatApiKey: "",
EmbeddingsApiKey: "",
ProviderName: "openai",
BaseURL: "https://api.openai.com",
ChatCompletionModel: "gpt-4o",
EmbeddingModel: "text-embedding-3-small",
Stream: true,
EncodingFormat: "float",
Temperature: 0.2,
Threshold: 0,
ChatApiVersion: "",
EmbeddingsApiVersion: "",
ChatApiKey: "",
EmbeddingsApiKey: "",
},
}

Expand All @@ -49,15 +50,16 @@ func LoadConfigs(rootCmd *cobra.Command, cwd string) *Config {
viper.SetDefault("theme", defaultConfig.Theme)
viper.SetDefault("rag", defaultConfig.RAG)
viper.SetDefault("ai_provider_config.provider_name", defaultConfig.AIProviderConfig.ProviderName)
viper.SetDefault("ai_provider_config.embedding_url", defaultConfig.AIProviderConfig.EmbeddingURL)
viper.SetDefault("ai_provider_config.chat_completion_url", defaultConfig.AIProviderConfig.ChatCompletionURL)
viper.SetDefault("ai_provider_config.base_url", defaultConfig.AIProviderConfig.BaseURL)
viper.SetDefault("ai_provider_config.chat_completion_model", defaultConfig.AIProviderConfig.ChatCompletionModel)
viper.SetDefault("ai_provider_config.embedding_model", defaultConfig.AIProviderConfig.EmbeddingModel)
viper.SetDefault("ai_provider_config.encoding_format", defaultConfig.AIProviderConfig.EncodingFormat)
viper.SetDefault("ai_provider_config.temperature", defaultConfig.AIProviderConfig.Temperature)
viper.SetDefault("ai_provider_config.threshold", defaultConfig.AIProviderConfig.Threshold)
viper.SetDefault("ai_provider_config.chat_api_key", defaultConfig.AIProviderConfig.ChatApiKey)
viper.SetDefault("ai_provider_config.embeddings_api_key", defaultConfig.AIProviderConfig.EmbeddingsApiKey)
viper.SetDefault("ai_provider_config.chat_api_version", defaultConfig.AIProviderConfig.ChatApiVersion)
viper.SetDefault("ai_provider_config.embeddings_api_version", defaultConfig.AIProviderConfig.EmbeddingsApiVersion)

// Automatically read environment variables
viper.AutomaticEnv() // This will look for variables that match config keys directly
Expand Down Expand Up @@ -105,29 +107,31 @@ func bindEnv() {
_ = viper.BindEnv("theme", "THEME")
_ = viper.BindEnv("rag", "RAG")
_ = viper.BindEnv("ai_provider_config.provider_name", "PROVIDER_NAME")
_ = viper.BindEnv("ai_provider_config.embedding_url", "EMBEDDING_URL")
_ = viper.BindEnv("ai_provider_config.chat_completion_url", "CHAT_COMPLETION_URL")
_ = viper.BindEnv("ai_provider_config.base_url", "BASE_URL")
_ = viper.BindEnv("ai_provider_config.chat_completion_model", "CHAT_COMPLETION_MODEL")
_ = viper.BindEnv("ai_provider_config.embedding_model", "EMBEDDING_MODEL")
_ = viper.BindEnv("ai_provider_config.temperature", "TEMPERATURE")
_ = viper.BindEnv("ai_provider_config.threshold", "THRESHOLD")
_ = viper.BindEnv("ai_provider_config.chat_api_key", "CHAT_API_KEY")
_ = viper.BindEnv("ai_provider_config.embeddings_api_key", "EMBEDDINGS_API_KEY")
_ = viper.BindEnv("ai_provider_config.chat_api_version", "CHAT_API_VERSION")
_ = viper.BindEnv("ai_provider_config.embeddings_api_version", "EMBEDDINGS_API_VERSION")
}

// bindFlags binds the CLI flags to configuration values.
func bindFlags(rootCmd *cobra.Command) {
_ = viper.BindPFlag("theme", rootCmd.Flags().Lookup("theme"))
_ = viper.BindPFlag("rag", rootCmd.Flags().Lookup("rag"))
_ = viper.BindPFlag("ai_provider_config.provider_name", rootCmd.Flags().Lookup("provider_name"))
_ = viper.BindPFlag("ai_provider_config.embedding_url", rootCmd.Flags().Lookup("embedding_url"))
_ = viper.BindPFlag("ai_provider_config.chat_completion_url", rootCmd.Flags().Lookup("chat_completion_url"))
_ = viper.BindPFlag("ai_provider_config.base_url", rootCmd.Flags().Lookup("base_url"))
_ = viper.BindPFlag("ai_provider_config.chat_completion_model", rootCmd.Flags().Lookup("chat_completion_model"))
_ = viper.BindPFlag("ai_provider_config.embedding_model", rootCmd.Flags().Lookup("embedding_model"))
_ = viper.BindPFlag("ai_provider_config.temperature", rootCmd.Flags().Lookup("temperature"))
_ = viper.BindPFlag("ai_provider_config.threshold", rootCmd.Flags().Lookup("threshold"))
_ = viper.BindPFlag("ai_provider_config.chat_api_key", rootCmd.Flags().Lookup("chat_api_key"))
_ = viper.BindPFlag("ai_provider_config.embeddings_api_key", rootCmd.Flags().Lookup("embeddings_api_key"))
_ = viper.BindPFlag("ai_provider_config.chat_api_version", rootCmd.Flags().Lookup("chat_api_version"))
_ = viper.BindPFlag("ai_provider_config.embeddings_api_version", rootCmd.Flags().Lookup("embeddings_api_version"))
}

// InitFlags initializes the flags for the root command.
Expand All @@ -138,12 +142,13 @@ func InitFlags(rootCmd *cobra.Command) {
rootCmd.PersistentFlags().Bool("rag", defaultConfig.RAG, "Enable Retrieval-Augmented Generation (RAG) for enhanced responses using relevant data retrieval (e.g., default is 'enabled' and just retrieve related context base on user request).")
rootCmd.PersistentFlags().StringP("version", "v", defaultConfig.Version, "Specifies the version of the application or service. This helps to track the release or update of the software.")
rootCmd.PersistentFlags().StringP("provider_name", "p", defaultConfig.AIProviderConfig.ProviderName, "Specifies the name of the AI service provider (e.g., 'openai' or 'ollama'). This determines which service or API will be used for AI-related functions.")
rootCmd.PersistentFlags().String("embedding_url", defaultConfig.AIProviderConfig.EmbeddingURL, "The API endpoint used for text embedding requests. This URL points to the server that processes and returns text embeddings.")
rootCmd.PersistentFlags().String("chat_completion_url", defaultConfig.AIProviderConfig.ChatCompletionURL, "The API endpoint for chat completion requests. This URL is where chat messages are sent to receive AI-generated responses.")
rootCmd.PersistentFlags().String("base_url", defaultConfig.AIProviderConfig.BaseURL, "The base URL of AI Provider (e.g., default is 'https://api.openai.com'.")
rootCmd.PersistentFlags().String("chat_completion_model", defaultConfig.AIProviderConfig.ChatCompletionModel, "The name of the model used for chat completions, such as 'gpt-4o'. Different models offer varying levels of performance and capabilities.")
rootCmd.PersistentFlags().String("embedding_model", defaultConfig.AIProviderConfig.EmbeddingModel, "Specifies the AI model used for generating text embeddings (e.g., 'text-embedding-ada-002'). This model converts text into vector representations for similarity comparisons.")
rootCmd.PersistentFlags().Float32("temperature", defaultConfig.AIProviderConfig.Temperature, "Adjusts the AI model’s creativity by setting a temperature value. Higher values result in more creative or varied responses, while lower values make them more focused (e.g., value should be between '0 - 1' and default is '0.2').")
rootCmd.PersistentFlags().Float64("threshold", defaultConfig.AIProviderConfig.Threshold, "Sets the threshold for similarity calculations in AI systems. Higher values will require closer matches and should be careful not to lose matches, while lower values provide a wider range of results to prevent losing any matches. (e.g., value should be between '0.2 - 1' and default is '0.3').")
rootCmd.PersistentFlags().String("chat_api_key", defaultConfig.AIProviderConfig.ChatApiKey, "The chat API key used to authenticate with the AI service provider.")
rootCmd.PersistentFlags().String("embeddings_api_key", defaultConfig.AIProviderConfig.EmbeddingsApiKey, "The embeddings API key used to authenticate with the AI service provider.")
rootCmd.PersistentFlags().String("chat_api_version", defaultConfig.AIProviderConfig.ChatApiVersion, "The API version used to authenticate with the chat AI service provider.")
rootCmd.PersistentFlags().String("embeddings_api_version", defaultConfig.AIProviderConfig.EmbeddingsApiVersion, "The API version used to authenticate with the embeddings AI service provider.")
}
70 changes: 43 additions & 27 deletions providers/ai_provider.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,24 +2,26 @@ package providers

import (
"errors"
azure_openai "github.com/meysamhadeli/codai/providers/azure-openai"
"github.com/meysamhadeli/codai/providers/contracts"
"github.com/meysamhadeli/codai/providers/ollama"
"github.com/meysamhadeli/codai/providers/openai"
)

type AIProviderConfig struct {
ProviderName string `mapstructure:"provider_name"`
EmbeddingURL string `mapstructure:"embedding_url"`
ChatCompletionURL string `mapstructure:"chat_completion_url"`
EmbeddingModel string `mapstructure:"embedding_model"`
ChatCompletionModel string `mapstructure:"chat_completion_model"`
Stream bool `mapstructure:"stream"`
Temperature float32 `mapstructure:"temperature"`
EncodingFormat string `mapstructure:"encoding_format"`
MaxTokens int `mapstructure:"max_tokens"`
Threshold float64 `mapstructure:"threshold"`
ChatApiKey string `mapstructure:"chat_api_key"`
EmbeddingsApiKey string `mapstructure:"embeddings_api_key"`
ProviderName string `mapstructure:"provider_name"`
BaseURL string `mapstructure:"base_url"`
EmbeddingModel string `mapstructure:"embedding_model"`
ChatCompletionModel string `mapstructure:"chat_completion_model"`
Stream bool `mapstructure:"stream"`
Temperature float32 `mapstructure:"temperature"`
EncodingFormat string `mapstructure:"encoding_format"`
MaxTokens int `mapstructure:"max_tokens"`
Threshold float64 `mapstructure:"threshold"`
ChatApiKey string `mapstructure:"chat_api_key"`
EmbeddingsApiKey string `mapstructure:"embeddings_api_key"`
ChatApiVersion string `mapstructure:"chat_api_version"`
EmbeddingsApiVersion string `mapstructure:"embeddings_api_version"`
}

// ProviderFactory creates a Provider based on the given provider config.
Expand All @@ -31,26 +33,40 @@ func ProviderFactory(config *AIProviderConfig, tokenManagement contracts.ITokenM
EncodingFormat: config.EncodingFormat,
ChatCompletionModel: config.ChatCompletionModel,
EmbeddingModel: config.EmbeddingModel,
ChatCompletionURL: config.ChatCompletionURL,
EmbeddingURL: config.EmbeddingURL,
BaseURL: config.BaseURL,
MaxTokens: config.MaxTokens,
Threshold: config.Threshold,
TokenManagement: tokenManagement,
}), nil
case "openai", "azure-openai":

case "openai":
return openai.NewOpenAIProvider(&openai.OpenAIConfig{
Temperature: config.Temperature,
EncodingFormat: config.EncodingFormat,
ChatCompletionModel: config.ChatCompletionModel,
EmbeddingModel: config.EmbeddingModel,
ChatCompletionURL: config.ChatCompletionURL,
EmbeddingURL: config.EmbeddingURL,
ChatApiKey: config.ChatApiKey,
EmbeddingsApiKey: config.EmbeddingsApiKey,
MaxTokens: config.MaxTokens,
Threshold: config.Threshold,
TokenManagement: tokenManagement,
Temperature: config.Temperature,
EncodingFormat: config.EncodingFormat,
ChatCompletionModel: config.ChatCompletionModel,
EmbeddingModel: config.EmbeddingModel,
BaseURL: config.BaseURL,
ChatApiKey: config.ChatApiKey,
EmbeddingsApiKey: config.EmbeddingsApiKey,
MaxTokens: config.MaxTokens,
Threshold: config.Threshold,
TokenManagement: tokenManagement,
ChatApiVersion: config.ChatApiVersion,
EmbeddingsApiVersion: config.EmbeddingsApiVersion,
}), nil
case "azure-openai", "azure_openai":
return azure_openai.NewAzureOpenAIProvider(&azure_openai.AzureOpenAIConfig{
Temperature: config.Temperature,
EncodingFormat: config.EncodingFormat,
ChatCompletionModel: config.ChatCompletionModel,
EmbeddingModel: config.EmbeddingModel,
BaseURL: config.BaseURL,
ChatApiKey: config.ChatApiKey,
EmbeddingsApiKey: config.EmbeddingsApiKey,
MaxTokens: config.MaxTokens,
Threshold: config.Threshold,
TokenManagement: tokenManagement,
ChatApiVersion: config.ChatApiVersion,
EmbeddingsApiVersion: config.EmbeddingsApiVersion,
}), nil
default:

Expand Down
Loading

0 comments on commit dfdae55

Please sign in to comment.