Skip to content

Commit 2f71c00

Browse files
authored
Merge pull request #27 from nforro/beeai
BeeAI workflow
2 parents 306eea3 + 3d8960b commit 2f71c00

File tree

14 files changed

+1206
-0
lines changed

14 files changed

+1206
-0
lines changed

beeai/Containerfile

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,36 @@
1+
FROM fedora:42
2+
3+
# Install system dependencies
4+
RUN dnf -y install \
5+
python3 \
6+
python3-pip \
7+
python3-redis \
8+
glab \
9+
git \
10+
curl \
11+
rpmlint \
12+
rpmdevtools \
13+
rpmbuild \
14+
spectool
15+
16+
COPY beeai-gemini.patch /tmp
17+
18+
# Install BeeAI Framework and Phoenix
19+
RUN pip3 install \
20+
beeai-framework[mcp,duckduckgo] \
21+
openinference-instrumentation-beeai \
22+
arize-phoenix-otel \
23+
&& cd /usr/local/lib/python3.13/site-packages \
24+
&& patch -p2 -i /tmp/beeai-gemini.patch
25+
26+
# Create user
27+
RUN useradd -m -G wheel beeai
28+
29+
# Create directories
30+
RUN mkdir -p /home/beeai/agents
31+
RUN chown -R beeai:beeai /home/beeai
32+
33+
USER beeai
34+
WORKDIR /home/beeai
35+
36+
CMD ["/bin/bash"]

beeai/Makefile

Lines changed: 94 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,94 @@
1+
IMAGE_NAME ?= beeai-agent
2+
COMPOSE_FILE ?= compose.yaml
3+
4+
COMPOSE ?= $(shell command -v podman >/dev/null 2>&1 && echo "podman compose" || echo "docker-compose")
5+
6+
.PHONY: build
7+
build:
8+
$(COMPOSE) -f $(COMPOSE_FILE) build
9+
10+
.PHONY: start-mcp
11+
start-mcp:
12+
$(COMPOSE) -f $(COMPOSE_FILE) up -d mcp-atlassian
13+
14+
.PHONY: stop-mcp
15+
stop-mcp:
16+
$(COMPOSE) -f $(COMPOSE_FILE) stop mcp-atlassian
17+
18+
.PHONY: start-valkey
19+
start-valkey:
20+
$(COMPOSE) -f $(COMPOSE_FILE) up -d valkey
21+
22+
.PHONY: stop-valkey
23+
stop-valkey:
24+
$(COMPOSE) -f $(COMPOSE_FILE) stop valkey
25+
26+
.PHONY: start-phoenix
27+
start-phoenix:
28+
$(COMPOSE) -f $(COMPOSE_FILE) up -d phoenix
29+
30+
.PHONY: stop-phoenix
31+
stop-phoenix:
32+
$(COMPOSE) -f $(COMPOSE_FILE) stop phoenix
33+
34+
.PHONY: run-beeai-bash
35+
run-beeai-bash:
36+
$(COMPOSE) -f $(COMPOSE_FILE) run --rm beeai-agent /bin/bash
37+
38+
.PHONY: run-triage-agent-standalone
39+
run-triage-agent-standalone:
40+
$(COMPOSE) -f $(COMPOSE_FILE) run --rm \
41+
-e JIRA_ISSUE=$(JIRA_ISSUE) \
42+
beeai-agent /usr/bin/python agents/triage_agent.py
43+
44+
.PHONY: run-triage-agent
45+
run-triage-agent:
46+
$(COMPOSE) -f $(COMPOSE_FILE) run --rm \
47+
beeai-agent /usr/bin/python agents/triage_agent.py
48+
49+
.PHONY: run-triage-agent-detached
50+
run-triage-agent-detached:
51+
$(COMPOSE) -f $(COMPOSE_FILE) run --rm --detach \
52+
beeai-agent /usr/bin/python agents/triage_agent.py
53+
54+
.PHONY: run-rebase-agent-standalone
55+
run-rebase-agent-standalone:
56+
$(COMPOSE) -f $(COMPOSE_FILE) run --rm \
57+
-e PACKAGE=$(PACKAGE) \
58+
-e VERSION=$(VERSION) \
59+
-e JIRA_ISSUE=$(JIRA_ISSUE) \
60+
-e BRANCH=$(BRANCH) \
61+
beeai-agent /usr/bin/python agents/rebase_agent.py
62+
63+
.PHONY: run-rebase-agent
64+
run-rebase-agent:
65+
$(COMPOSE) -f $(COMPOSE_FILE) run --rm \
66+
beeai-agent /usr/bin/python agents/rebase_agent.py
67+
68+
.PHONY: run-rebase-agent-detached
69+
run-rebase-agent-detached:
70+
$(COMPOSE) -f $(COMPOSE_FILE) run --rm --detach \
71+
beeai-agent /usr/bin/python agents/rebase_agent.py
72+
73+
.PHONY: run-backport-agent-standalone
74+
run-backport-agent-standalone:
75+
$(COMPOSE) -f $(COMPOSE_FILE) run --rm \
76+
-e PACKAGE=$(PACKAGE) \
77+
-e UPSTREAM_FIX=$(UPSTREAM_FIX) \
78+
-e JIRA_ISSUE=$(JIRA_ISSUE) \
79+
-e BRANCH=$(BRANCH) \
80+
beeai-agent /usr/bin/python agents/backport_agent.py
81+
82+
.PHONY: run-backport-agent
83+
run-backport-agent:
84+
$(COMPOSE) -f $(COMPOSE_FILE) run --rm \
85+
beeai-agent /usr/bin/python agents/backport_agent.py
86+
87+
.PHONY: run-backport-agent-detached
88+
run-backport-agent-detached:
89+
$(COMPOSE) -f $(COMPOSE_FILE) run --rm --detach \
90+
beeai-agent /usr/bin/python agents/backport_agent.py
91+
92+
.PHONY: clean
93+
clean:
94+
$(COMPOSE) -f $(COMPOSE_FILE) down --volumes

beeai/README.md

Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,29 @@
1+
# BeeAI workflow
2+
3+
A set of AI agents implemented in the BeeAI Framework, interconnected via Redis.
4+
Every agent can run individually or pick up tasks from a Redis queue.
5+
6+
## Setup
7+
8+
Copy the `templates` directory to `.secrets` and fill in required information.
9+
10+
## Running the workflow
11+
12+
Start the agents using e.g. `make run-triage-agent` or `make run-triage-agent-detached`. This will automatically
13+
start all the related services. There is currently no mechanism to initiate the workflow other than manually
14+
pushing a task to the `triage_queue` Redis list. The easiest way to do that is with:
15+
16+
```bash
17+
podman compose exec valkey -- redis-cli lpush triage_queue '{"metadata":{"issue":"RHEL-12345"}}'
18+
```
19+
20+
## Running individual agents
21+
22+
You can run any agent individually with the appropriate make target, passing required input data
23+
via environment variables, e.g. `make JIRA_ISSUE=RHEL-12345 run-triage-agent-standalone`.
24+
The agent will run only once, print its output and exit.
25+
26+
## Observability
27+
28+
You can connect to http://localhost:6006/ to access Phoenix web interface and trace agents
29+
(it works with individual runs too).

beeai/agents/backport_agent.py

Lines changed: 189 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,189 @@
1+
import asyncio
2+
import os
3+
import sys
4+
import traceback
5+
from typing import Optional
6+
7+
from pydantic import BaseModel, Field
8+
9+
from beeai_framework.agents.experimental.requirements.conditional import (
10+
ConditionalRequirement,
11+
)
12+
from beeai_framework.backend import ChatModel
13+
from beeai_framework.errors import FrameworkError
14+
from beeai_framework.memory import UnconstrainedMemory
15+
from beeai_framework.middleware.trajectory import GlobalTrajectoryMiddleware
16+
from beeai_framework.tools import Tool
17+
from beeai_framework.tools.search.duckduckgo import DuckDuckGoSearchTool
18+
from beeai_framework.tools.think import ThinkTool
19+
20+
from base_agent import BaseAgent, TInputSchema, TOutputSchema
21+
from observability import setup_observability
22+
from tools import ShellCommandTool
23+
from triage_agent import BackportData, ErrorData
24+
from utils import redis_client
25+
26+
27+
class InputSchema(BaseModel):
28+
package: str = Field(description="Package to update")
29+
upstream_fix: str = Field(description="Link to an upstream fix for the issue")
30+
jira_issue: str = Field(description="Jira issue to reference as resolved")
31+
dist_git_branch: str = Field(description="Git branch in dist-git to be updated")
32+
gitlab_user: str = Field(
33+
description="Name of the GitLab user",
34+
default=os.getenv("GITLAB_USER", "rhel-packaging-agent"),
35+
)
36+
git_url: str = Field(
37+
description="URL of the git repository",
38+
default="https://gitlab.com/redhat/centos-stream/rpms",
39+
)
40+
git_user: str = Field(description="Name of the git user", default="RHEL Packaging Agent")
41+
git_email: str = Field(
42+
description="E-mail address of the git user", default="[email protected]"
43+
)
44+
45+
46+
class OutputSchema(BaseModel):
47+
success: bool = Field(description="Whether the backport was successfully completed")
48+
status: str = Field(description="Backport status")
49+
mr_url: Optional[str] = Field(description="URL to the opened merge request")
50+
error: Optional[str] = Field(description="Specific details about an error")
51+
52+
53+
class BackportAgent(BaseAgent):
54+
def __init__(self) -> None:
55+
super().__init__(
56+
llm=ChatModel.from_name(os.getenv("CHAT_MODEL")),
57+
tools=[ThinkTool(), ShellCommandTool(), DuckDuckGoSearchTool()],
58+
memory=UnconstrainedMemory(),
59+
requirements=[
60+
ConditionalRequirement(ThinkTool, force_after=Tool, consecutive_allowed=False),
61+
],
62+
middlewares=[GlobalTrajectoryMiddleware()],
63+
)
64+
65+
@property
66+
def input_schema(self) -> type[TInputSchema]:
67+
return InputSchema
68+
69+
@property
70+
def output_schema(self) -> type[TOutputSchema]:
71+
return OutputSchema
72+
73+
@property
74+
def prompt(self) -> str:
75+
return """
76+
You are an agent for backporting a fix for a CentOS Stream package. You will prepare the content
77+
of the update and then create a commit with the changes. Create a temporary directory and always work
78+
inside it. Follow exactly these steps:
79+
80+
1. Find the location of the {{ package }} package at {{ git_url }}. Always use the {{ dist_git_branch }} branch.
81+
82+
2. Check if the package {{ package }} already has the fix {{ jira_issue }} applied.
83+
84+
3. Create a local Git repository by following these steps:
85+
* Check if the fork already exists for {{ gitlab_user }} as {{ gitlab_user }}/{{ package }} and if not,
86+
create a fork of the {{ package }} package using the glab tool.
87+
* Clone the fork using git and HTTPS into the temp directory.
88+
* Run command `centpkg sources` in the cloned repository which downloads all sources defined in the RPM specfile.
89+
* Create a new Git branch named `automated-package-update-{{ jira_issue }}`.
90+
91+
4. Update the {{ package }} with the fix:
92+
* Updating the 'Release' field in the .spec file as needed (or corresponding macros), following packaging
93+
documentation.
94+
* Make sure the format of the .spec file remains the same.
95+
* Fetch the upstream fix {{ upstream_fix }} locally and store it in the git repo as "{{ jira_issue }}.patch".
96+
* Add a new "Patch:" entry in the spec file for patch "{{ jira_issue }}.patch".
97+
* Verify that the patch is being applied in the "%prep" section.
98+
* Creating a changelog entry, referencing the Jira issue as "Resolves: <jira_issue>" for the issue {{ jira_issue }}.
99+
The changelog entry has to use the current date.
100+
* IMPORTANT: Only performing changes relevant to the backport update: Do not rename variables,
101+
comment out existing lines, or alter if-else branches in the .spec file.
102+
103+
5. Verify and adjust the changes:
104+
* Use `rpmlint` to validate your .spec file changes and fix any new errors it identifies.
105+
* Generate the SRPM using `rpmbuild -bs` (ensure your .spec file and source files are correctly copied
106+
to the build environment as required by the command).
107+
* Verify the newly added patch applies cleanly using the command `centpkg prep`.
108+
109+
6. Commit the changes:
110+
* The title of the Git commit should be in the format "[DO NOT MERGE: AI EXPERIMENTS] backport {{ jira_issue }}"
111+
* Include the reference to Jira as "Resolves: <jira_issue>" for the issue {{ jira_issue }}.
112+
* Commit the RPM spec file change and the newly added patch file.
113+
* Push the commit to the fork.
114+
115+
7. Open a merge request:
116+
* Authenticate using `glab`
117+
* Open a merge request against the upstream repository of the {{ package }} in {{ git_url }}
118+
with previously created commit.
119+
"""
120+
121+
122+
async def main() -> None:
123+
setup_observability(os.getenv("COLLECTOR_ENDPOINT"))
124+
agent = BackportAgent()
125+
126+
if (
127+
(package := os.getenv("PACKAGE", None))
128+
and (upstream_fix := os.getenv("UPSTREAM_FIX", None))
129+
and (jira_issue := os.getenv("JIRA_ISSUE", None))
130+
and (branch := os.getenv("BRANCH", None))
131+
):
132+
input = InputSchema(
133+
package=package,
134+
upstream_fix=upstream_fix,
135+
jira_issue=jira_issue,
136+
dist_git_branch=branch,
137+
)
138+
output = await agent.run_with_schema(input)
139+
print(output.model_dump_json(indent=4))
140+
return
141+
142+
class Task(BaseModel):
143+
metadata: dict = Field(description="Task metadata")
144+
attempts: int = Field(default=0, description="Number of processing attempts")
145+
146+
async with redis_client(os.getenv("REDIS_URL")) as redis:
147+
max_retries = int(os.getenv("MAX_RETRIES", 3))
148+
while True:
149+
element = await redis.brpop("backport_queue", timeout=30)
150+
if element is None:
151+
continue
152+
_, payload = element
153+
task = Task.model_validate_json(payload)
154+
backport_data = BackportData.model_validate(task.metadata)
155+
input = InputSchema(
156+
package=backport_data.package,
157+
upstream_fix=backport_data.patch_url,
158+
jira_issue=backport_data.jira_issue,
159+
dist_git_branch=backport_data.branch,
160+
)
161+
162+
async def retry(task, error):
163+
task.attempts += 1
164+
if task.attempts < max_retries:
165+
await redis.lpush("backport_queue", task.model_dump_json())
166+
else:
167+
await redis.lpush("error_list", error)
168+
169+
try:
170+
output = await agent.run_with_schema(input)
171+
except Exception as e:
172+
error = "".join(traceback.format_exception(e))
173+
print(error, file=sys.stderr)
174+
await retry(
175+
task, ErrorData(details=error, jira_issue=input.jira_issue).model_dump_json()
176+
)
177+
else:
178+
if output.success:
179+
await redis.lpush("completed_backport_list", output.model_dump_json())
180+
else:
181+
await retry(task, output.error)
182+
183+
184+
if __name__ == "__main__":
185+
try:
186+
asyncio.run(main())
187+
except FrameworkError as e:
188+
traceback.print_exc()
189+
sys.exit(e.explain())

beeai/agents/base_agent.py

Lines changed: 41 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,41 @@
1+
from abc import ABC, abstractmethod
2+
from typing import TypeVar
3+
4+
from pydantic import BaseModel
5+
6+
from beeai_framework.agents.experimental import RequirementAgent, RequirementAgentRunOutput
7+
from beeai_framework.template import PromptTemplate, PromptTemplateInput
8+
9+
10+
TInputSchema = TypeVar("TInputSchema", bound=BaseModel)
11+
TOutputSchema = TypeVar("TOutputSchema", bound=BaseModel)
12+
13+
14+
class BaseAgent(RequirementAgent, ABC):
15+
@property
16+
@abstractmethod
17+
def input_schema(self) -> type[TInputSchema]: ...
18+
19+
@property
20+
@abstractmethod
21+
def output_schema(self) -> type[TOutputSchema]: ...
22+
23+
@property
24+
@abstractmethod
25+
def prompt(self) -> str: ...
26+
27+
def _render_prompt(self, input: TInputSchema) -> str:
28+
template = PromptTemplate(
29+
PromptTemplateInput(schema=self.input_schema, template=self.prompt)
30+
)
31+
return template.render(input)
32+
33+
async def _run_with_schema(self, input: TInputSchema) -> TOutputSchema:
34+
response = await self.run(
35+
prompt=self._render_prompt(input),
36+
expected_output=self.output_schema,
37+
)
38+
return self.output_schema.model_validate_json(response.result.text)
39+
40+
async def run_with_schema(self, input: TInputSchema) -> TOutputSchema:
41+
return await self._run_with_schema(input)

0 commit comments

Comments
 (0)