-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Showing
32 changed files
with
1,583 additions
and
1 deletion.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,62 @@ | ||
name: Release | ||
|
||
on: | ||
push: | ||
branches: | ||
- main | ||
|
||
permissions: | ||
contents: write | ||
pages: write | ||
id-token: write | ||
|
||
jobs: | ||
pages: | ||
runs-on: ubuntu-latest | ||
environment: | ||
name: Development | ||
|
||
steps: | ||
- name: Checkout repository | ||
uses: actions/checkout@v3 | ||
|
||
- name: Set up Node.js | ||
uses: actions/setup-node@v3 | ||
with: | ||
node-version: 21 | ||
|
||
- name: Set up Python | ||
uses: actions/setup-python@v4 | ||
with: | ||
python-version: 3.x | ||
|
||
- name: Install dependencies | ||
run: | | ||
sudo apt update | ||
sudo apt install -y python3-pip python3-venv | ||
npm install @semantic-release/[email protected] | ||
npm install @semantic-release/[email protected] | ||
npm install @semantic-release/[email protected] | ||
npm install @semantic-release/[email protected] | ||
- name: Make release | ||
env: | ||
GH_TOKEN: ${{ secrets.GH_TOKEN }} | ||
run: npx semantic-release --debug | ||
|
||
- name: Create documentation | ||
run: | | ||
python3 -m venv venv | ||
source venv/bin/activate | ||
pip install . | ||
cd docs && make html && cd .. | ||
mkdir public | ||
mv docs/build/html/* public/ | ||
- name: Upload artifact | ||
uses: actions/upload-pages-artifact@v3 | ||
with: | ||
path: ./public | ||
- name: Deploy to GitHub Pages | ||
id: deployment | ||
uses: actions/deploy-pages@v4 |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,3 @@ | ||
*~ | ||
ai_agent.egg-info | ||
__pycache__ |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,19 @@ | ||
#!/bin/bash | ||
|
||
# This script has to be called from the root of the repository | ||
|
||
if [[ $# -eq 0 ]] ; then | ||
echo "Usage: $0 <VERSION_NUMBER>" | ||
exit 0 | ||
fi | ||
|
||
VERSION=${1} | ||
|
||
# service-template a.b.c | ||
sed -i "s/## AI Agent Microservice [[:digit:]]\+\.[[:digit:]]\+\.[[:digit:]]\+/## AI Agent Microservice ${VERSION}/g" README.md | ||
#version="a.b.c" | ||
sed -i "s/version=\"[[:digit:]]\+\.[[:digit:]]\+\.[[:digit:]]\+\"/version=\"${VERSION}\"/g" setup.py | ||
# release = 'a.b.c' | ||
sed -i "s/release = '[[:digit:]]\+\.[[:digit:]]\+\.[[:digit:]]\+'/release = '${VERSION}'/g" docs/source/conf.py | ||
# version: a.b.c | ||
sed -i "s/version: [[:digit:]]\+\.[[:digit:]]\+\.[[:digit:]]\+/version: ${VERSION}/g" api/openapi.yaml |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,17 @@ | ||
{ | ||
"plugins": [ | ||
"@semantic-release/commit-analyzer", | ||
"@semantic-release/release-notes-generator", | ||
["@semantic-release/exec", { | ||
"prepareCmd": "./.release/update_version_number.sh ${nextRelease.version}" | ||
}], | ||
["@semantic-release/changelog", { | ||
"changelogFile": "CHANGELOG.md" | ||
}], | ||
["@semantic-release/git", { | ||
"assets": ["CHANGELOG.md", "README.md", "api/openapi.yaml", "docs/source/conf.py", "setup.py", "docs/source/index.rst"], | ||
"message": "Bump version number to ${nextRelease.version} [skip ci]\n\n${nextRelease.notes}" | ||
}], | ||
"@semantic-release/github" | ||
] | ||
} |
Empty file.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1 @@ | ||
include aiagent/templates/*.html |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1 +1,147 @@ | ||
# ai-agent-service | ||
## AI Agent Microservice 1.0.0 | ||
|
||
AI Agent Microservice allows natural communication between the user and other microservices. This service | ||
uses the Hugging Face LLM Trelis/Llama-2-7b-chat-hf-function-calling-v3 to convert text commands into API | ||
calls, process the LLM result, and call the corresponding API request. | ||
|
||
The service requires a prompt description defining the available functions and the behavior of the LLM and a | ||
configuration file that defines the API calls and their mapping with the prompt function. See below for more details. | ||
|
||
### Prompt format | ||
|
||
Prompt should follow the model prompt format, please check model documentation at | ||
https://huggingface.co/Trelis/Llama-2-7b-chat-hf-function-calling-v3 | ||
|
||
### API configuration | ||
|
||
The API configuration is a JSON file with the following structure: | ||
|
||
```json | ||
{ | ||
"function_name": { | ||
"ip": "api_ip", | ||
"port": "api_port", | ||
"path": "api_request_path", | ||
"method": "request_method", | ||
"properties": { | ||
"prop1": "prop1 value", | ||
"prop2": "prop2 value" | ||
}, | ||
"body": { | ||
"arg1": "value", | ||
"arg2": "value" | ||
} | ||
} | ||
} | ||
``` | ||
The JSON should have a function object per function described in the prompt since it will be used to | ||
map the LLM reply function with the microservice API call. So the function_name should match one of the | ||
functions defined in the prompt. | ||
|
||
The arguments port, path, and method are required. Port is the port of the microservice to call, path | ||
is the route of the specific API request and method is the method for the request can be GET, POST, PUT. | ||
|
||
The argument ip is optional, it defines the IP of the microservice to call. If not defined localhost 127.0.0.0 | ||
will be used. | ||
|
||
properties object defines the parameters of the API request. It is optional, add it only if the API request uses | ||
parameters. The value of each property will be obtained from the LLM reply, so the string in the value should | ||
match the argument name defined in the corresponding prompt function. | ||
|
||
body object defines the API request content. It is optional, add it only if the API request needs body description. | ||
The value of each argument in the body will be obtained from the LLM reply, so the string in the value should | ||
match the argument name defined in the corresponding prompt function. | ||
|
||
|
||
Check the following example: | ||
|
||
```json | ||
{ | ||
"search_object": { | ||
"ip": "192.168.86.25", | ||
"port": 30080, | ||
"path": "genai/prompt", | ||
"method": "GET", | ||
"properties": { | ||
"objects": "input", | ||
"thresholds": 0.2 | ||
} | ||
}, | ||
"move_camera": { | ||
"port": 1234, | ||
"path": "position", | ||
"method": "PUT", | ||
"body": { | ||
"pan": "pan_angle", | ||
"tilt": "tilt_angle" | ||
} | ||
} | ||
} | ||
``` | ||
|
||
### Running the service | ||
|
||
The project is configured (via setup.py) to install the service with the name __ai-agent__. So to install it run: | ||
|
||
```bash | ||
pip install . | ||
``` | ||
|
||
Then you will have the service with the following options: | ||
|
||
```bash | ||
usage: ai-agent [-h] [--port PORT] --system_prompt SYSTEM_PROMPT --api_map API_MAP | ||
|
||
options: | ||
-h, --help show this help message and exit | ||
--port PORT Port for server | ||
--system_prompt SYSTEM_PROMPT | ||
String with system prompt or path to a txt file with the prompt | ||
--api_map API_MAP Path to a JSON file with API mapping configuration | ||
usage: ai-agent [-h] [--port PORT] [--system_prompt SYSTEM_PROMPT] [--api_map API_MAP] | ||
``` | ||
|
||
Notice that the system_prompt and api_map are required, so to run it use the following command: | ||
|
||
```bash | ||
ai-agent --prompt PROMPT --api_map API_MAP | ||
``` | ||
|
||
This will start the service in address 127.0.0.0 and port 5010. If you want to use a different port, use the __--port__ options. | ||
|
||
|
||
## AI Agent Docker | ||
|
||
|
||
### Build the container | ||
|
||
We can build the gen-ai microservice container using the Dockerfile in the docker directory. This includes a base tensort | ||
image and the dependencies to run the ai-agent microservice application. | ||
|
||
First, we need to prepare the context directory for this build, you need to create a directory and include this repository | ||
and the rrms-utils project. The Dockerfile will look for both packages in the context directory and copy them to the container. | ||
|
||
```bash | ||
ai-agent-context/ | ||
├── ai-agent | ||
└── rrms-utils | ||
``` | ||
|
||
Then build the container image with the following command: | ||
|
||
```bash | ||
DOCKER_BUILDKIT=0 docker build --network=host --tag ridgerun/ai-agent-service --file ai-agent-context/ai-agent/docker/Dockerfile ai-agent-context/ | ||
``` | ||
|
||
Change ai-agent-context/ to the path of your context and the tag to the name you want to give to your image. | ||
|
||
|
||
### Launch the container | ||
|
||
The container can be launched by running the following command: | ||
|
||
```bash | ||
docker run --runtime nvidia -it --network host --volume /home/nvidia/config:/ai-agent-config --name ai-agent ridgerun/ai-agent-service:latest ai-agent --system_prompt ai-agent-config/prompt.txt --api_map ai-agent-config/api_mapping.json | ||
``` | ||
|
||
Here we are creating a container called ai-agent. Notice we are mounting the directory /home/nvidia/config into /ai-agent-config, this contains the prompt and API configuration files, you can change it to point to your configuration directory or any place where you have the required configs. Also, we are defining the ai-agent microservice application as entry point with its corresponding parameters. |
Empty file.
Empty file.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,106 @@ | ||
""" | ||
Copyright (C) 2024 RidgeRun, LLC (http://www.ridgerun.com) | ||
All Rights Reserved. | ||
The contents of this software are proprietary and confidential to RidgeRun, | ||
LLC. No part of this program may be photocopied, reproduced or translated | ||
into another programming language without prior written consent of | ||
RidgeRun, LLC. The user is free to modify the source code after obtaining | ||
a software license from RidgeRun. All source code changes must be provided | ||
back to RidgeRun without any encumbrance. | ||
""" | ||
import json | ||
import logging | ||
|
||
import requests | ||
from rrmsutils.models.apiresponse import ApiResponse | ||
|
||
logger = logging.getLogger("ai-agent") | ||
|
||
|
||
class ApiDispatcher: | ||
""" | ||
Call API corresponding to function calling request | ||
""" | ||
|
||
def __init__(self, mapping_file): | ||
with open(mapping_file, encoding="utf-8") as json_map: | ||
self._api_mapping = json.load(json_map) | ||
|
||
def parse_request(self, request): | ||
""" | ||
Parse request and map to API call | ||
""" | ||
|
||
# Get request parameters | ||
request_json = json.loads(request) | ||
function = request_json['name'] | ||
arguments = request_json['arguments'] | ||
|
||
# Get mapping parameters | ||
mapping = self._api_mapping[function] | ||
if "ip" in mapping: | ||
ip = mapping['ip'] | ||
else: | ||
ip = "127.0.0.1" | ||
|
||
port = mapping['port'] | ||
path = mapping['path'] | ||
method = mapping['method'] | ||
|
||
# Initialize URI | ||
uri = 'http://' + ip + ':' + str(port) + '/' + path | ||
|
||
# Parse properties | ||
if "properties" in mapping: | ||
uri_arguments = '' | ||
for prop in mapping['properties']: | ||
if not uri_arguments: | ||
uri_arguments = '?' | ||
else: | ||
uri_arguments += '&' | ||
uri_arguments += prop + '=' | ||
|
||
key = str(mapping['properties'][prop]) | ||
|
||
if key in arguments: | ||
uri_arguments += arguments[key] | ||
else: | ||
uri_arguments += key | ||
|
||
uri += uri_arguments | ||
|
||
# Parse body | ||
json_body = None | ||
if "body" in mapping: | ||
json_body = mapping['body'].copy() | ||
for prop in mapping['body']: | ||
key = str(mapping['body'][prop]) | ||
if key in arguments: | ||
json_body[prop] = arguments[key] | ||
|
||
return method, uri, json_body | ||
|
||
def process_request(self, request): | ||
""" | ||
Process request and call corresponding API | ||
""" | ||
|
||
try: | ||
method, uri, json_body = self.parse_request(request) | ||
except Exception as e: | ||
logger.warning(f"Failed to parse request to api. {repr(e)}") | ||
response = ApiResponse( | ||
code=1, message="Missing mapping parameter. " + repr(e)) | ||
return response.model_dump_json(), 200 | ||
|
||
# Send API request | ||
logger.info(f"Sending API request uri: {uri}, body: {json_body}") | ||
|
||
try: | ||
r = requests.request(method, uri, json=json_body) | ||
except Exception as e: | ||
response = ApiResponse(code=1, message=repr(e)) | ||
return response.model_dump_json(), 400 | ||
|
||
return r.text, r.status_code |
Oops, something went wrong.