diff --git a/aider/coders/mixture_of_architects_coder.py b/aider/coders/mixture_of_architects_coder.py
index 717efa359a5..f88202ac940 100644
--- a/aider/coders/mixture_of_architects_coder.py
+++ b/aider/coders/mixture_of_architects_coder.py
@@ -43,13 +43,23 @@ def get_architect_prompt(self, architect):
def get_architect_response(self, architect, current_user_message):
"""Get response from individual architect with proper prompting"""
try:
- # Build architect-specific conversation history
- full_context = f"You are architect {architect.name.upper()}.\n\n"
+ # Create and configure AskCoder
+ ask_coder = AskCoder.create(
+ main_model=architect.model,
+ io=self.io,
+ fnames=list(self.abs_fnames),
+ read_only_fnames=list(self.abs_read_only_fnames),
+ repo=self.repo,
+ map_tokens=self.repo_map.max_map_tokens if self.repo_map else 0,
+ summarize_from_coder=False,
+ stream=True,
+ )
+ ask_coder.auto_commits = self.auto_commits
+ ask_coder.gpt_prompts = MixturePrompts()
# Group messages by conversation round
rounds = []
current_round = []
-
for msg in self.discussion_messages:
if msg["role"] == "user":
if current_round:
@@ -57,65 +67,66 @@ def get_architect_response(self, architect, current_user_message):
current_round = [msg]
else:
current_round.append(msg)
-
if current_round:
rounds.append(current_round)
- # Build context from rounds
+ # Build the conversation messages
for round_msgs in rounds:
user_msg = next(msg for msg in round_msgs if msg["role"] == "user")
- full_context += "\n"
- full_context += user_msg["content"]
- full_context += "\n\n\n"
- # Add architects' responses/proposals
+ # Combine user message with other architects' proposals
+ user_content = "\n"
+ user_content += user_msg["content"]
+ user_content += "\n\n\n"
+
+ # Add other architects' proposals from this round
+ for msg in round_msgs:
+ if (
+ msg["role"] == "assistant"
+ and msg["name"] != architect.name.upper()
+ ):
+ content = msg["content"]
+ proposal_match = re.search(
+ r"(.*?)", content, re.DOTALL
+ )
+ if proposal_match:
+ proposal_content = proposal_match.group(1).strip()
+ user_content += f"\n"
+ user_content += proposal_content
+ user_content += "\n\n\n"
+
+ ask_coder.cur_messages.append({"role": "user", "content": user_content})
+
+ # Add this architect's own response if they had one
for msg in round_msgs:
- if msg["role"] == "assistant":
- if msg["name"] == architect.name.upper():
- # Include full response for the current architect
- full_context += f"\n"
- full_context += msg["content"]
- full_context += "\n\n\n"
- else:
- # Only include proposal content from other architects
- content = msg["content"]
- proposal_match = re.search(
- r"(.*?)", content, re.DOTALL
- )
- if proposal_match:
- proposal_content = proposal_match.group(1).strip()
- full_context += f"\n"
- full_context += proposal_content
- full_context += "\n\n\n"
-
- # Only add current message if it's not already the last user message
- if not rounds or rounds[-1][0]["content"] != current_user_message:
- full_context += "\n"
- full_context += current_user_message
- full_context += "\n\n"
+ if (
+ msg["role"] == "assistant"
+ and msg["name"] == architect.name.upper()
+ ):
+ ask_coder.cur_messages.append(
+ {"role": "assistant", "content": msg["content"]}
+ )
# Debug output if verbose
if self.verbose:
- self.io.tool_output(f"\nDebug: Context being sent to {architect.name}:")
- self.io.tool_output("-" * 40)
- self.io.tool_output(full_context)
- self.io.tool_output("-" * 40 + "\n")
- # Create and configure AskCoder
- ask_coder = AskCoder.create(
- main_model=architect.model,
- io=self.io,
- fnames=list(self.abs_fnames),
- read_only_fnames=list(self.abs_read_only_fnames),
- repo=self.repo,
- map_tokens=self.repo_map.max_map_tokens if self.repo_map else 0,
- summarize_from_coder=False,
- stream=True,
- )
- ask_coder.auto_commits = self.auto_commits
- ask_coder.gpt_prompts = MixturePrompts()
-
- response = ask_coder.run(with_message=full_context, preproc=False)
+ self.io.rule()
+ self.io.tool_output(
+ f"\nDebug: Messages being sent to {architect.name}:", bold=True
+ )
+ self.io.tool_output("-" * 40)
+ for msg in ask_coder.cur_messages:
+ self.io.tool_output(f"{msg['role'].upper()}:")
+ self.io.tool_output(msg["content"])
+ self.io.tool_output("-" * 40)
+
+ # Pass the current message with XML tags as with_message
+ formatted_message = f"""
+ You are arhitect {architect.name}
+
+ {current_user_message}
+ """
+ response = ask_coder.run(with_message=formatted_message, preproc=False)
if not response.strip():
self.io.tool_warning(f"Warning: Empty response from {architect.name}")
@@ -140,13 +151,15 @@ def run_discussion_round(self, user_message):
return
# Debug: Show which architects are active
+ self.io.rule()
self.io.tool_output(
f"Active architects: {[arch.name for arch in active_architects]}"
)
# Process architects sequentially instead of concurrently
for arch in active_architects:
- self.io.tool_output(f"Waiting for {arch.name}'s response...")
+ self.io.tool_output(f"Waiting for {arch.name}'s response...", bold=True)
+ self.io.rule()
try:
arch, response = self.get_architect_response(arch, user_message)
@@ -173,9 +186,9 @@ def run_discussion_round(self, user_message):
)
# Show architect's proposal immediately
- if arch.last_response:
+ if self.verbose and arch.last_response:
self.io.rule()
- self.io.tool_output(f"{arch.name.upper()}'s proposal:", bold=True)
+ self.io.tool_output(f"{arch.name.upper()}'s Response:", bold=True)
self.io.tool_output(f"\n{arch.last_response}\n")
# Add final divider