Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Unified Configuration File in TOML Format #1174

Draft
wants to merge 11 commits into
base: main
Choose a base branch
from
36 changes: 36 additions & 0 deletions config.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
# config.toml
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm confused, isn't this configuration file gpt-engineer specific?

It would probably be a confusingly general name for most projects.

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Fully agreed. I will do that in the following commits.

# Unified configuration file for the gpt-engineer project

# API Configuration
[API]
# API key for OpenAPI
# OPENAI_API_KEY=Your personal OpenAI API key from https://platform.openai.com/account/api-keys
OPENAI_API_KEY = "your_api_key_here"
ANTHROPIC_API_KEY = "your_anthropic_api_key_here"

# Model configurations
[model]
model_name = "gpt-4o"
# Controls randomness: lower values for more focused, deterministic outputs
temperature = 0.1
# Endpoint for your Azure OpenAI Service (https://xx.openai.azure.com).
# In that case, the given model is the deployment name chosen in the Azure AI Studio.
azure_endpoint = ""

# improve mode Configuration
[improve]
# Linting with BLACK (Python) enhances code suggestions from LLMs.
# Enable or disable linting (true/false)
is_linting = false
# Enable or disable file selection. "true" will open your default editor to select the file. (true/false)
is_file_selection = true

# Git Filter Configuration
[git_filter]
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'd suggest this section be renamed.

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Fully agreed. Once all the functions are finalized, we'll rename this section and revise all the names in a subsequent commit. For now, I've just placed some placeholders here to help identify and locate all configurations.

# File extension settings for the git filter
file_extensions = ["py", "toml", "md"]

# Self-Healing Mechanism Configuration
[self_healing]
# Number of retry attempts for self-healing mechanisms (0-2)
retry_attempts = 1
31 changes: 2 additions & 29 deletions gpt_engineer/applications/cli/file_selector.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,15 +53,11 @@ class FileSelector:
IGNORE_FOLDERS = {"site-packages", "node_modules", "venv", "__pycache__"}
FILE_LIST_NAME = "file_selection.toml"
COMMENT = (
"# Remove '#' to select a file or turn off linting.\n\n"
"# Linting with BLACK (Python) enhances code suggestions from LLMs. "
"To disable linting, uncomment the relevant option in the linting settings.\n\n"
"# Remove '#' to select a file\n\n"
"# gpt-engineer can only read selected files. "
"Including irrelevant files will degrade performance, "
"cost additional tokens and potentially overflow token limit.\n\n"
)
LINTING_STRING = '[linting]\n# "linting" = "off"\n\n'
is_linting = True

def __init__(self, project_path: Union[str, Path]):
"""
Expand Down Expand Up @@ -118,7 +114,7 @@ def ask_for_files(self, skip_file_selection=False) -> tuple[FilesDict, bool]:
except UnicodeDecodeError:
print(f"Warning: File not UTF-8 encoded {file_path}, skipping")

return FilesDict(content_dict), self.is_linting
return FilesDict(content_dict)

def editor_file_selector(
self, input_path: Union[str, Path], init: bool = True
Expand Down Expand Up @@ -160,25 +156,13 @@ def editor_file_selector(
# Write to the toml file
with open(toml_file, "w") as f:
f.write(self.COMMENT)
f.write(self.LINTING_STRING)
f.write(s)

else:
# Load existing files from the .toml configuration
all_files = self.get_current_files(root_path)
s = toml.dumps({"files": {x: "selected" for x in all_files}})

# get linting status from the toml file
with open(toml_file, "r") as file:
linting_status = toml.load(file)
if (
"linting" in linting_status
and linting_status["linting"].get("linting", "").lower() == "off"
):
self.is_linting = False
self.LINTING_STRING = '[linting]\n"linting" = "off"\n\n'
print("\nLinting is disabled")

with open(toml_file, "r") as file:
selected_files = toml.load(file)

Expand All @@ -196,7 +180,6 @@ def editor_file_selector(
# Write the merged list back to the .toml for user review and modification
with open(toml_file, "w") as file:
file.write(self.COMMENT) # Ensure to write the comment
file.write(self.LINTING_STRING)
file.write(s)

print(
Expand Down Expand Up @@ -294,16 +277,6 @@ def get_files_from_toml(
selected_files = []
edited_tree = toml.load(toml_file) # Load the edited .toml file

# check if users have disabled linting or not
if (
"linting" in edited_tree
and edited_tree["linting"].get("linting", "").lower() == "off"
):
self.is_linting = False
print("\nLinting is disabled")
else:
self.is_linting = True

# Iterate through the files in the .toml and append selected files to the list
for file, _ in edited_tree["files"].items():
selected_files.append(file)
Expand Down
136 changes: 94 additions & 42 deletions gpt_engineer/applications/cli/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@
import sys

from pathlib import Path
from typing import Optional

import openai
import typer
Expand All @@ -60,6 +61,7 @@
from gpt_engineer.core.files_dict import FilesDict
from gpt_engineer.core.git import stage_uncommitted_to_git
from gpt_engineer.core.preprompts_holder import PrepromptsHolder
from gpt_engineer.core.project_config import Config
from gpt_engineer.core.prompt import Prompt
from gpt_engineer.tools.custom_steps import clarified_gen, lite_gen, self_heal

Expand Down Expand Up @@ -280,80 +282,72 @@ def format_installed_packages(packages):
)
def main(
project_path: str = typer.Argument(".", help="path"),
model: str = typer.Option(
os.environ.get("MODEL_NAME", "gpt-4o"), "--model", "-m", help="model id string"
),
temperature: float = typer.Option(
0.1,
model: Optional[str] = typer.Option(None, "--model", "-m", help="model id string"),
temperature: Optional[float] = typer.Option(
None,
"--temperature",
"-t",
help="Controls randomness: lower values for more focused, deterministic outputs",
),
improve_mode: bool = typer.Option(
False,
improve_mode: Optional[bool] = typer.Option(
None,
"--improve",
"-i",
help="Improve an existing project by modifying the files.",
),
lite_mode: bool = typer.Option(
False,
lite_mode: Optional[bool] = typer.Option(
None,
"--lite",
"-l",
help="Lite mode: run a generation using only the main prompt.",
),
clarify_mode: bool = typer.Option(
False,
clarify_mode: Optional[bool] = typer.Option(
None,
"--clarify",
"-c",
help="Clarify mode - discuss specification with AI before implementation.",
),
self_heal_mode: bool = typer.Option(
False,
self_heal_mode: Optional[bool] = typer.Option(
None,
"--self-heal",
"-sh",
help="Self-heal mode - fix the code by itself when it fails.",
),
azure_endpoint: str = typer.Option(
"",
azure_endpoint: Optional[str] = typer.Option(
None,
"--azure",
"-a",
help="""Endpoint for your Azure OpenAI Service (https://xx.openai.azure.com).
In that case, the given model is the deployment name chosen in the Azure AI Studio.""",
help="Endpoint for your Azure OpenAI Service (https://xx.openai.azure.com). In that case, the given model is the deployment name chosen in the Azure AI Studio.",
),
use_custom_preprompts: bool = typer.Option(
False,
use_custom_preprompts: Optional[bool] = typer.Option(
None,
"--use-custom-preprompts",
help="""Use your project's custom preprompts instead of the default ones.
Copies all original preprompts to the project's workspace if they don't exist there.""",
help="Use your project's custom preprompts instead of the default ones. Copies all original preprompts to the project's workspace if they don't exist there.",
),
llm_via_clipboard: bool = typer.Option(
False,
llm_via_clipboard: Optional[bool] = typer.Option(
None,
"--llm-via-clipboard",
help="Use the clipboard to communicate with the AI.",
),
verbose: bool = typer.Option(
False, "--verbose", "-v", help="Enable verbose logging for debugging."
verbose: Optional[bool] = typer.Option(
None, "--verbose", "-v", help="Enable verbose logging for debugging."
),
debug: bool = typer.Option(
False, "--debug", "-d", help="Enable debug mode for debugging."
debug: Optional[bool] = typer.Option(
None, "--debug", "-d", help="Enable debug mode for debugging."
),
prompt_file: str = typer.Option(
"prompt",
"--prompt_file",
help="Relative path to a text file containing a prompt.",
prompt_file: Optional[str] = typer.Option(
None, "--prompt_file", help="Relative path to a text file containing a prompt."
),
entrypoint_prompt_file: str = typer.Option(
"",
entrypoint_prompt_file: Optional[str] = typer.Option(
None,
"--entrypoint_prompt",
help="Relative path to a text file containing a file that specifies requirements for you entrypoint.",
help="Relative path to a text file containing a file that specifies requirements for your entrypoint.",
),
image_directory: str = typer.Option(
"",
"--image_directory",
help="Relative path to a folder containing images.",
image_directory: Optional[str] = typer.Option(
None, "--image_directory", help="Relative path to a folder containing images."
),
use_cache: bool = typer.Option(
False,
use_cache: Optional[bool] = typer.Option(
None,
"--use_cache",
help="Speeds up computations and saves tokens when running the same prompt multiple times by caching the LLM response.",
),
Expand All @@ -366,7 +360,7 @@ def main(
no_execution: bool = typer.Option(
False,
"--no_execution",
help="Run setup but to not call LLM or write any code. For testing purposes.",
help="Run setup but do not call LLM or write any code. For testing purposes.",
),
sysinfo: bool = typer.Option(
False,
Expand Down Expand Up @@ -428,6 +422,63 @@ def main(
None
"""

# ask if the user wants to change the configuration
print(
"The configuration file(config.toml) is located in the root directory. You can edit it with your preferred "
"text editor."
)
# todo: interface to edit the configuration

# read the configuration file from the root directory
config = Config()
config_dict = config.from_toml(Path(os.getcwd()) / "config.toml").to_dict()

# todo: apply configuration here

# Loading the configuration from the config_dict

model = model or config_dict["model"]["model_name"]
temperature = (
temperature if temperature is not None else config_dict["model"]["temperature"]
)
azure_endpoint = azure_endpoint or config_dict["model"]["azure_endpoint"]

# Improve mode configuration
improve_mode = (
improve_mode
if improve_mode is not None
else config_dict["improve"]["is_file_selection"]
)
lite_mode = (
lite_mode if lite_mode is not None else config_dict["improve"]["is_linting"]
)

# Self-healing mechanism configuration
self_heal_mode = (
self_heal_mode
if self_heal_mode is not None
else config_dict["self_healing"]["retry_attempts"]
)

# Git filter configuration
config_dict["git_filter"]["file_extensions"] # Assuming this is needed somewhere

# API keys
config_dict["API"]["OPENAI_API_KEY"]
config_dict["API"]["ANTHROPIC_API_KEY"]

# Default values for optional parameters
clarify_mode = clarify_mode or False
use_custom_preprompts = use_custom_preprompts or False
llm_via_clipboard = llm_via_clipboard or False
verbose = verbose or False
debug = debug or False
prompt_file = prompt_file or "prompt"
entrypoint_prompt_file = entrypoint_prompt_file or ""
image_directory = image_directory or ""
use_cache = use_cache or False
no_execution = no_execution or False

if debug:
import pdb

Expand Down Expand Up @@ -517,9 +568,10 @@ def main(
files_dict_before, is_linting = FileSelector(project_path).ask_for_files(
skip_file_selection=skip_file_selection
)
files_dict_before = FileSelector(project_path).ask_for_files()

# lint the code
if is_linting:
if config_dict["improve"]["is_linting"]:
files_dict_before = files.linting(files_dict_before)

files_dict = handle_improve_mode(
Expand Down
Loading
Loading