diff --git a/astrbot/core/astr_agent_run_util.py b/astrbot/core/astr_agent_run_util.py index eca24699ae..85d49655e0 100644 --- a/astrbot/core/astr_agent_run_util.py +++ b/astrbot/core/astr_agent_run_util.py @@ -2,7 +2,8 @@ import re import time import traceback -from collections.abc import AsyncGenerator +from collections.abc import AsyncGenerator, Callable +from typing import Any from astrbot.core import logger from astrbot.core.agent.message import Message @@ -94,6 +95,8 @@ async def run_agent( show_tool_call_result: bool = False, stream_to_general: bool = False, show_reasoning: bool = False, + # 回调函数:每步完成后调用,参数为 (step_idx, resp_type, resp_data) + step_callback: Callable[[int, str, Any], None] | None = None, ) -> AsyncGenerator[MessageChain | None, None]: step_idx = 0 astr_event = agent_runner.run_context.context.event @@ -149,6 +152,10 @@ async def run_agent( ), ) + # 回调通知 + if step_callback: + step_callback(step_idx, "tool_call_result", resp.data) + if msg_chain.type == "tool_direct_result": # tool_direct_result 用于标记 llm tool 需要直接发送给用户的内容 await astr_event.send(msg_chain) @@ -181,6 +188,10 @@ async def run_agent( ) _record_tool_call_name(tool_info, tool_name_by_call_id) + # 回调通知 + if step_callback: + step_callback(step_idx, "tool_call", resp.data) + if astr_event.get_platform_name() == "webchat": await astr_event.send(resp.data["chain"]) elif show_tool_use: @@ -202,6 +213,11 @@ async def run_agent( if resp.type == "llm_result" else ResultContentType.GENERAL_RESULT ) + + # 回调通知 llm_result + if step_callback and resp.type == "llm_result": + step_callback(step_idx, "llm_result", resp.data) + astr_event.set_result( MessageEventResult( chain=resp.data["chain"].chain, @@ -223,6 +239,10 @@ async def run_agent( except asyncio.CancelledError: pass if agent_runner.done(): + # 回调通知完成 + if step_callback: + step_callback(step_idx, "done", None) + # send agent stats to webchat if astr_event.get_platform_name() == "webchat": await astr_event.send( diff --git a/astrbot/core/db/po.py b/astrbot/core/db/po.py index 451f054f62..d99b38a27f 100644 --- a/astrbot/core/db/po.py +++ b/astrbot/core/db/po.py @@ -132,6 +132,16 @@ class Persona(TimestampMixin, SQLModel, table=True): """所属文件夹ID,NULL 表示在根目录""" sort_order: int = Field(default=0) """排序顺序""" + personality_config: dict | None = Field(default=None, sa_type=JSON) + """高级人格配置:人格特质、表达风格、识别规则、心情标签等""" + chat_config: dict | None = Field(default=None, sa_type=JSON) + """高级人格配置:聊天频率、动态频率、消息长度等""" + robot_config: dict | None = Field(default=None, sa_type=JSON) + """高级人格配置:昵称、别名、平台等""" + llm_model_config: dict | None = Field(default=None, sa_type=JSON) + """高级人格配置:模型配置(功能模型、回复模型、思考模型)""" + is_advanced: bool | None = Field(default=False) + """是否为高级人格""" __table_args__ = ( UniqueConstraint( @@ -476,6 +486,16 @@ class Personality(TypedDict): """Skills 列表。None 表示使用所有 Skills,空列表表示不使用任何 Skills""" custom_error_message: str | None """可选的人格自定义报错回复信息。配置后将优先发送给最终用户。""" + personality_config: dict | None + """高级人格配置:人格特质、表达风格、识别规则、心情标签等""" + chat_config: dict | None + """高级人格配置:聊天频率、动态频率、消息长度等""" + robot_config: dict | None + """高级人格配置:昵称、别名、平台等""" + llm_model_config: dict | None + """高级人格配置:模型配置(功能模型、回复模型、思考模型)""" + is_advanced: bool + """是否为高级人格""" # cache _begin_dialogs_processed: list[dict] diff --git a/astrbot/core/db/sqlite.py b/astrbot/core/db/sqlite.py index c8e50909d5..5a6cf6fe3e 100644 --- a/astrbot/core/db/sqlite.py +++ b/astrbot/core/db/sqlite.py @@ -47,6 +47,12 @@ def __init__(self, db_path: str) -> None: async def initialize(self) -> None: """Initialize the database by creating tables if they do not exist.""" + # 延迟导入 MindSim 记忆模型,避免循环导入 + from astrbot.core.mind_sim.memory.models import ( # noqa: F401 + MindSimChatMemory, + MindSimPersonMemory, + ) + async with self.engine.begin() as conn: await conn.run_sync(SQLModel.metadata.create_all) await conn.execute(text("PRAGMA journal_mode=WAL")) @@ -59,6 +65,7 @@ async def initialize(self) -> None: await self._ensure_persona_folder_columns(conn) await self._ensure_persona_skills_column(conn) await self._ensure_persona_custom_error_message_column(conn) + await self._ensure_persona_advanced_columns(conn) await conn.commit() async def _ensure_persona_folder_columns(self, conn) -> None: @@ -103,6 +110,44 @@ async def _ensure_persona_custom_error_message_column(self, conn) -> None: text("ALTER TABLE personas ADD COLUMN custom_error_message TEXT") ) + async def _ensure_persona_advanced_columns(self, conn) -> None: + """确保 personas 表有高级人格配置列(前向兼容)。 + + 新增列: + - personality_config: JSON - 人格特质、表达风格、识别规则、心情标签等 + - chat_config: JSON - 聊天频率、动态频率、消息长度等 + - robot_config: JSON - 昵称、别名、平台等 + - llm_model_config: JSON - 模型配置(功能模型、回复模型、思考模型) + - is_advanced: INTEGER - 是否为高级人格 + """ + result = await conn.execute(text("PRAGMA table_info(personas)")) + columns = {row[1] for row in result.fetchall()} + + if "personality_config" not in columns: + await conn.execute( + text( + "ALTER TABLE personas ADD COLUMN personality_config JSON DEFAULT NULL" + ) + ) + if "chat_config" not in columns: + await conn.execute( + text("ALTER TABLE personas ADD COLUMN chat_config JSON DEFAULT NULL") + ) + if "robot_config" not in columns: + await conn.execute( + text("ALTER TABLE personas ADD COLUMN robot_config JSON DEFAULT NULL") + ) + if "llm_model_config" not in columns: + await conn.execute( + text( + "ALTER TABLE personas ADD COLUMN llm_model_config JSON DEFAULT NULL" + ) + ) + if "is_advanced" not in columns: + await conn.execute( + text("ALTER TABLE personas ADD COLUMN is_advanced INTEGER DEFAULT 0") + ) + # ==== # Platform Statistics # ==== @@ -689,6 +734,11 @@ async def insert_persona( custom_error_message=None, folder_id=None, sort_order=0, + personality_config=None, + chat_config=None, + robot_config=None, + llm_model_config=None, + is_advanced=False, ): """Insert a new persona record.""" async with self.get_db() as session: @@ -703,6 +753,11 @@ async def insert_persona( custom_error_message=custom_error_message, folder_id=folder_id, sort_order=sort_order, + personality_config=personality_config, + chat_config=chat_config, + robot_config=robot_config, + llm_model_config=llm_model_config, + is_advanced=is_advanced, ) session.add(new_persona) await session.flush() @@ -733,6 +788,11 @@ async def update_persona( tools=NOT_GIVEN, skills=NOT_GIVEN, custom_error_message=NOT_GIVEN, + personality_config=NOT_GIVEN, + chat_config=NOT_GIVEN, + robot_config=NOT_GIVEN, + llm_model_config=NOT_GIVEN, + is_advanced=NOT_GIVEN, ): """Update a persona's system prompt or begin dialogs.""" async with self.get_db() as session: @@ -750,6 +810,16 @@ async def update_persona( values["skills"] = skills if custom_error_message is not NOT_GIVEN: values["custom_error_message"] = custom_error_message + if personality_config is not NOT_GIVEN: + values["personality_config"] = personality_config + if chat_config is not NOT_GIVEN: + values["chat_config"] = chat_config + if robot_config is not NOT_GIVEN: + values["robot_config"] = robot_config + if llm_model_config is not NOT_GIVEN: + values["llm_model_config"] = llm_model_config + if is_advanced is not NOT_GIVEN: + values["is_advanced"] = is_advanced if not values: return None query = query.values(**values) diff --git a/astrbot/core/mind_sim/AgentMindSubStage.py b/astrbot/core/mind_sim/AgentMindSubStage.py new file mode 100644 index 0000000000..ae7664e053 --- /dev/null +++ b/astrbot/core/mind_sim/AgentMindSubStage.py @@ -0,0 +1,762 @@ +"""高级人格 LLM 调用模块 - 替代 MindSimLLM 的角色路由 + run_agent 模式 + +作为 InternalMindSubStage/Brain/ReplyAction 的 LLM 调用层,支持: +- 按角色(deep/medium/fast/function/reply)注入不同模型 +- 组装提示词 + 调用 run_agent() +- 通过回调返回结果 or 直接发送到平台 +- 流式/非流式响应 + +- AgentMindSubStage:给高级人格 Stage 使用,支持更灵活的 run_agent 模式 +""" + +import asyncio +import base64 +from collections.abc import Callable +from dataclasses import dataclass +from typing import Any + +from astrbot.core import logger +from astrbot.core.agent.message import Message +from astrbot.core.agent.response import AgentStats +from astrbot.core.astr_main_agent import ( + MainAgentBuildConfig, + MainAgentBuildResult, + build_main_agent, +) +from astrbot.core.message.message_event_result import ( + MessageChain, + MessageEventResult, + ResultContentType, +) +from astrbot.core.persona_error_reply import ( + extract_persona_custom_error_message_from_event, +) +from astrbot.core.platform.astr_message_event import AstrMessageEvent +from astrbot.core.provider.entities import LLMResponse, ProviderRequest +from astrbot.core.star.star_handler import EventType +from astrbot.core.utils.metrics import Metric +from astrbot.core.utils.session_lock import session_lock_manager + +from ..astr_agent_run_util import AgentRunner, run_agent, run_live_agent +from ..pipeline.context_utils import call_event_hook + +# 安全防护:阻止连接到已知的恶意主机 +BLOCKED = {"dGZid2h2d3IuY2xvdWQuc2VhbG9zLmlv", "a291cmljaGF0"} +decoded_blocked = [base64.b64decode(b).decode("utf-8") for b in BLOCKED] + + +@dataclass +class ModelConfig: + """单个角色模型配置""" + + provider_id: str = "" + """Provider 实例 ID""" + model: str = "" + """模型名称""" + temperature: float = 0.7 + max_tokens: int = 4096 + + +@dataclass +class LLMCallResult: + """LLM 调用结果""" + + text: str = "" + """完整响应文本""" + streaming_delta: str = "" + """流式增量(单块文本)""" + is_streaming: bool = False + """是否流式""" + is_done: bool = False + """是否完成""" + usage: Any = None + """Token 用量""" + + +class AgentMindSubStage: + """高级人格 LLM 调用器 + + 支持: + - 按角色注册不同模型 + - 组装提示词 + 调用 run_agent + - 流式/非流式响应 + - 通过 step_callback 获取每步结果 + - 直接发送结果到平台 + + 架构与 internal.py 完全一致: + 1. 发送"正在输入"状态 + 2. 调用 OnWaitingLLMRequestEvent 钩子 + 3. 获取会话锁(确保同一会话请求顺序执行) + 4. 获取动作类型(支持 Live Mode) + 5. 根据模式选择 run_live_agent / run_agent 流式 / run_agent 普通 + 6. 保存历史记录 + 7. 上传指标 + + 使用方式: + 1. 创建实例,传入 event 和配置 + 2. 注册角色模型(可选) + 3. 调用 call() 或 call_simple() 获取结果 + """ + + def __init__( + self, + event: AstrMessageEvent, + plugin_context: Any, + config: dict | None = None, + provider_wake_prefix: str = "", + ): + """ + Args: + event: 当前消息事件 + plugin_context: 插件上下文(用于获取 Provider) + config: 提供者设置(来自 provider_settings) + provider_wake_prefix: 提供者唤醒前缀 + """ + self.event = event + self.plugin_context = plugin_context + self.config = config or {} + self.provider_wake_prefix = provider_wake_prefix + + # 模型配置 + self._role_configs: dict[str, ModelConfig] = {} + self._provider_cache: dict[str, Any] = {} + + # 流式响应配置 + self.streaming_response: bool = ( + config.get("streaming_response", True) if config else True + ) + self.unsupported_streaming_strategy: str = ( + config.get("unsupported_streaming_strategy", "turn_off") + if config + else "turn_off" + ) + + # Agent 执行配置 这里默认是1 + self.max_step: int = 1 + self.show_tool_use: bool = ( + config.get("show_tool_use_status", True) if config else True + ) + self.show_tool_call_result: bool = ( + config.get("show_tool_call_result", False) if config else False + ) + self.show_reasoning: bool = ( + config.get("display_reasoning_text", False) if config else False + ) + + # Token 统计 + self._total_usage = None + + # 最后一次 call() 的完成文本 + self._last_completion_text: str = "" + + # 回调函数 + self._step_callback: Callable[[int, str, Any], None] | None = None + self._result_callback: Callable[[str], None] | None = None + + # 会话锁管理器 + self._conv_manager = None + + # Brain 事件队列引用(用于发送 PIPELINE_YIELD 事件) + self._mind_event_queue: asyncio.Queue | None = None + + def register_model(self, role: str, model_config: ModelConfig) -> str | None: + """注册角色对应的模型配置 + + Args: + role: 角色名 (deep/medium/fast/function/reply) + model_config: 模型配置 + + Returns: + 错误信息字符串,None 表示成功 + """ + if not model_config.provider_id or not model_config.model: + return None + + # 查缓存或获取 Provider + provider = self._provider_cache.get(model_config.provider_id) + if not provider: + provider = self.plugin_context.provider_manager.inst_map.get( + model_config.provider_id + ) + if not provider: + return f"提供商 '{model_config.provider_id}' 不存在或已被删除" + self._provider_cache[model_config.provider_id] = provider + + self._role_configs[role] = model_config + logger.debug( + f"[AgentMindSubStage] 注册模型 role={role}, " + f"provider={model_config.provider_id}, model={model_config.model}" + ) + return None + + def register_models_from_persona_config(self, persona_config: dict) -> list[str]: + """从高级人格配置注册所有角色模型 + + Args: + persona_config: 人格配置字典 + + Returns: + 注册失败的错误列表 + """ + errors = [] + llm_model_config = persona_config.get("llm_model_config", {}) + + role_map = { + "deep": llm_model_config.get("thinking_models", {}).get("deep", {}), + "medium": llm_model_config.get("thinking_models", {}).get("medium", {}), + "fast": llm_model_config.get("thinking_models", {}).get("fast", {}), + "function": llm_model_config.get("function_model", {}), + "reply": llm_model_config.get("reply_model", {}), + } + + for role, cfg_dict in role_map.items(): + if not cfg_dict: + continue + model_config = ModelConfig( + provider_id=cfg_dict.get("provider_id", ""), + model=cfg_dict.get("model", ""), + temperature=cfg_dict.get("temperature", 0.7), + max_tokens=cfg_dict.get("max_tokens", 4096), + ) + error = self.register_model(role, model_config) + if error: + errors.append(f"{role}: {error}") + logger.warning(f"[AgentMindSubStage] {role} 模型注册失败: {error}") + + return errors + + def _get_provider_and_model(self, role: str) -> tuple[Any, str | None, float]: + """获取角色对应的 Provider 实例、模型名和温度 + + Returns: + (Provider 实例, 模型名, 温度) + """ + default_provider = self.plugin_context.get_using_provider( + umo=self.event.unified_msg_origin + ) + config = self._role_configs.get(role) + if not config: + return default_provider, None, 0.7 + + provider = self._provider_cache.get(config.provider_id, default_provider) + return provider, config.model, config.temperature + + def set_step_callback(self, callback: Callable[[int, str, Any], None] | None): + """设置步骤回调(每步完成后调用)""" + self._step_callback = callback + + def set_result_callback(self, callback: Callable[[str], None] | None): + """设置结果回调(每次产出一个文本片段时调用)""" + self._result_callback = callback + + async def _build_agent_runner( + self, + system_prompt: str, + user_prompt: str, + contexts: list[dict] | None = None, + role: str = "deep", + ) -> MainAgentBuildResult: + """构建 Agent Runner + + 与 internal.py 一致,返回 reset_coro 由调用方决定何时执行。 + + Args: + system_prompt: 系统提示词 + user_prompt: 用户提示词 + contexts: 上下文消息列表(OpenAI 格式) + role: 模型角色(用于选择模型) + + Returns: + (agent_runner, provider_request, provider, reset_coro) + """ + provider, model_name, temperature = self._get_provider_and_model(role) + if not model_name: + model_name = provider.get_model() + + # 构建消息 + messages = [] + if system_prompt: + messages.append({"role": "system", "content": system_prompt}) + if contexts: + messages.extend(contexts) + messages.append({"role": "user", "content": user_prompt}) + + # 获取对话管理器 + if not self._conv_manager: + self._conv_manager = self.plugin_context.conversation_manager + + cid = await self._conv_manager.get_curr_conversation_id( + self.event.unified_msg_origin + ) + if not cid: + cid = await self._conv_manager.new_conversation( + self.event.unified_msg_origin, self.event.get_platform_id() + ) + conversation = await self._conv_manager.get_conversation( + self.event.unified_msg_origin, cid + ) + + req = ProviderRequest( + prompt=user_prompt, + session_id=str(self.event.session), + image_urls=[], + contexts=[], + system_prompt=system_prompt, + conversation=conversation, + func_tool=None, + tool_calls_result=None, + model=model_name, + ) + + # 构建 Agent + build_cfg = MainAgentBuildConfig( + tool_call_timeout=60, + tool_schema_mode="full", + streaming_response=self.streaming_response, + provider_settings=self.config, + max_quoted_fallback_images=20, + ) + + result = await build_main_agent( + event=self.event, + plugin_context=self.plugin_context, + req=req, + config=build_cfg, + apply_reset=False, + ) + + if result is None: + raise RuntimeError("Agent 构建失败") + # result.provider_request.contexts =[] + # result.provider_request.prompt = user_prompt + + return result + # # build_main_agent 会用数据库对话历史覆盖 req.contexts, + # req.contexts = messages + # + # agent_runner = result.agent_runner + # reset_coro = result.reset_coro + # + # return agent_runner, req, provider, reset_coro + + async def _pipeline_yield(self): + """桥接 pipeline yield 机制 + + AgentMindSubStage 不在 pipeline 里,无法直接 yield 给框架。 + 通过 Brain 的事件队列发送 PIPELINE_YIELD 事件, + InternalMindSubStage 收到后 yield 给 pipeline 框架(让 RespondStage 处理 event.result), + 完成后 set done_event 通知本方法返回。 + """ + from astrbot.core.mind_sim.messages import MindEvent + + if not self._mind_event_queue: + logger.warning( + "[AgentMindSubStage] 无 mind_event_queue,跳过 pipeline yield" + ) + return + + done_event = asyncio.Event() + await self._mind_event_queue.put(MindEvent.pipeline_yield(done_event)) + # 等待 InternalMindSubStage yield 完成 + await done_event.wait() + + async def call( + self, + prompt: str, + role: str = "deep", + system_prompt: str = "", + contexts: list[dict] | None = None, + streaming: bool | None = None, + max_step: int | None = None, + send_to_platform: bool = True, + ) -> str: + """调用 LLM 生成响应(与 internal.py process() 流程完全一致) + + 通过 PIPELINE_YIELD 事件桥接 pipeline 框架的 yield 机制, + 让 event.set_result() 的结果能被 RespondStage 处理并发送到平台。 + + Returns: + 最终响应文本 + """ + streaming_response = ( + streaming if streaming is not None else self.streaming_response + ) + use_max_step = max_step or self.max_step + + event = self.event + agent_runner: AgentRunner | None = None + + try: + # 1. 发送"正在输入"状态 + await event.send_typing() + # 2. 调用 OnWaitingLLMRequestEvent 钩子 + await call_event_hook(event, EventType.OnWaitingLLMRequestEvent) + + # 3. 获取会话锁(确保同一会话请求顺序执行) + async with session_lock_manager.acquire_lock(event.unified_msg_origin): + logger.debug("[AgentMindSubStage] 已获取会话锁") + + try: + # 4. 构建 Agent Runner + build_result = await self._build_agent_runner( + system_prompt=system_prompt, + user_prompt=prompt, + contexts=contexts, + role=role, + ) + # 提取构建结果中的组件 + agent_runner = build_result.agent_runner + req = build_result.provider_request + provider = build_result.provider + reset_coro = build_result.reset_coro + + # 安全检查 + api_base = provider.provider_config.get("api_base", "") + for host in decoded_blocked: + if host in api_base: + logger.error( + "Provider API base %s is blocked due to security reasons.", + api_base, + ) + return "" + + # 检查是否应该将流式响应转换为普通响应 + stream_to_general = ( + self.unsupported_streaming_strategy == "turn_off" + and not event.platform_meta.support_streaming_message + ) + + # 5. 调用 OnLLMRequestEvent 钩子 + if await call_event_hook(event, EventType.OnLLMRequestEvent, req): + if reset_coro: + reset_coro.close() + return "" + + # 应用重置协程 + if reset_coro: + await reset_coro + + # 6. 获取动作类型(支持 Live Mode) + action_type = event.get_extra("action_type") + + # 记录追踪信息 + event.trace.record( + "astr_agent_prepare", + system_prompt=req.system_prompt, + tools=req.func_tool.names() if req.func_tool else [], + stream=streaming_response, + chat_provider={ + "id": provider.provider_config.get("id", ""), + "model": provider.get_model(), + }, + ) + + # Live Mode(实时语音模式) + if action_type == "live": + logger.info( + "[AgentMindSubStage] 检测到 Live Mode,启用 TTS 处理" + ) + + tts_provider = self.plugin_context.get_using_tts_provider( + event.unified_msg_origin + ) + + if not tts_provider: + logger.warning( + "[Live Mode] TTS Provider 未配置,将使用普通流式模式" + ) + + # 使用 run_live_agent,总是使用流式响应 + event.set_result( + MessageEventResult() + .set_result_content_type(ResultContentType.STREAMING_RESULT) + .set_async_stream( + run_live_agent( + agent_runner, + tts_provider, + use_max_step, + self.show_tool_use, + self.show_tool_call_result, + show_reasoning=self.show_reasoning, + ), + ), + ) + await self._pipeline_yield() + + # 保存历史记录 + if agent_runner.done() and ( + not event.is_stopped() or agent_runner.was_aborted() + ): + await self._save_to_history( + req, + agent_runner.get_final_llm_resp(), + agent_runner.run_context.messages, + agent_runner.stats, + user_aborted=agent_runner.was_aborted(), + ) + + # 流式响应模式(非 Live Mode) + elif streaming_response and not stream_to_general: + event.set_result( + MessageEventResult() + .set_result_content_type(ResultContentType.STREAMING_RESULT) + .set_async_stream( + run_agent( + agent_runner, + use_max_step, + self.show_tool_use, + self.show_tool_call_result, + show_reasoning=self.show_reasoning, + ), + ), + ) + await self._pipeline_yield() + + # 流式完成后设置最终结果 + if agent_runner.done(): + if final_llm_resp := agent_runner.get_final_llm_resp(): + if final_llm_resp.completion_text: + chain = ( + MessageChain() + .message(final_llm_resp.completion_text) + .chain + ) + elif final_llm_resp.result_chain: + chain = final_llm_resp.result_chain.chain + else: + chain = MessageChain().chain + + event.set_result( + MessageEventResult( + chain=chain, + result_content_type=ResultContentType.LLM_RESULT, + ), + ) + + # 保存历史记录 + if not event.is_stopped() or agent_runner.was_aborted(): + await self._save_to_history( + req, + agent_runner.get_final_llm_resp(), + agent_runner.run_context.messages, + agent_runner.stats, + user_aborted=agent_runner.was_aborted(), + ) + + # 普通响应模式(非流式或流式转普通) + else: + async for _ in run_agent( + agent_runner, + use_max_step, + self.show_tool_use, + self.show_tool_call_result, + stream_to_general, + self.show_reasoning, + ): + await self._pipeline_yield() + + # 获取最终响应 + final_resp = agent_runner.get_final_llm_resp() + + # 保存完成文本供调用方读取 + self._last_completion_text = ( + final_resp.completion_text if final_resp else "" + ) or "" + + # 记录代理完成信息 + event.trace.record( + "astr_agent_complete", + stats=agent_runner.stats.to_dict(), + resp=final_resp.completion_text if final_resp else None, + ) + + # 普通模式保存历史记录 + if ( + not (streaming_response and not stream_to_general) + and action_type != "live" + ): + if not event.is_stopped() or agent_runner.was_aborted(): + await self._save_to_history( + req, + final_resp, + agent_runner.run_context.messages, + agent_runner.stats, + user_aborted=agent_runner.was_aborted(), + ) + + # 上传指标 + asyncio.create_task( + Metric.upload( + llm_tick=1, + model_name=agent_runner.provider.get_model(), + provider_type=agent_runner.provider.meta().type, + ), + ) + + except Exception: + raise + + except Exception as e: + logger.error(f"[AgentMindSubStage] LLM 调用失败: {e}") + custom_error_message = extract_persona_custom_error_message_from_event( + event + ) + error_text = custom_error_message or f"LLM 调用失败: {e}" + await event.send(MessageChain().message(error_text)) + return "" + + return self._last_completion_text + + async def _save_to_history( + self, + req: ProviderRequest, + llm_response: LLMResponse | None, + all_messages: list[Message], + runner_stats: AgentStats | None, + user_aborted: bool = False, + ) -> None: + """保存对话历史到数据库 + + 与 internal.py 的 _save_to_history 逻辑完全一致。 + """ + return # 在这里暂时不保存 + # if not req or not req.conversation: + # return + # + # if not llm_response and not user_aborted: + # return + # + # if llm_response and llm_response.role != "assistant": + # if not user_aborted: + # return + # llm_response = LLMResponse( + # role="assistant", + # completion_text=llm_response.completion_text or "", + # ) + # elif llm_response is None: + # llm_response = LLMResponse(role="assistant", completion_text="") + # + # if ( + # not llm_response.completion_text + # and not req.tool_calls_result + # and not user_aborted + # ): + # logger.debug("[AgentMindSubStage] LLM 响应为空,不保存记录。") + # return + # + # # 过滤和准备要保存的消息 + # message_to_save = [] + # skipped_initial_system = False + # for message in all_messages: + # if message.role == "system" and not skipped_initial_system: + # skipped_initial_system = True + # continue + # if message.role in ["assistant", "user"] and message._no_save: + # continue + # message_to_save.append(message.model_dump()) + # + # token_usage = None + # if runner_stats and llm_response and llm_response.usage: + # token_usage = llm_response.usage.total + # + # if not self._conv_manager: + # self._conv_manager = self.plugin_context.conversation_manager + # + # await self._conv_manager.update_conversation( + # self.event.unified_msg_origin, + # req.conversation.cid, + # history=message_to_save, + # token_usage=token_usage, + # ) + + async def call_simple( + self, + prompt: str, + role: str = "deep", + system_prompt: str = "", + contexts: list[dict] | None = None, + ) -> str: + """简单调用 LLM,直接返回文本(不使用 run_agent) + + 用于不需要工具调用能力的场景,如 Brain 的思考过程。 + + Args: + prompt: 用户提示词 + role: 模型角色 + system_prompt: 系统提示词 + contexts: 上下文消息列表 + + Returns: + LLM 响应文本 + """ + provider, model_name, temperature = self._get_provider_and_model(role) + + messages = [] + if system_prompt: + messages.append({"role": "system", "content": system_prompt}) + if contexts: + messages.extend(contexts) + messages.append({"role": "user", "content": prompt}) + + try: + response: LLMResponse = await provider.text_chat( + prompt=prompt, + contexts=messages, + model=model_name, + temperature=temperature, + ) + + if response.usage: + self._total_usage = response.usage + + if response.role == "err": + raise RuntimeError( + f"LLM 返回错误: {response.completion_text or '未知错误'}" + ) + + return response.completion_text or response.reasoning_content or "" + + except Exception as e: + logger.error(f"[AgentMindSubStage] call_simple 失败 (role={role}): {e}") + raise + + @property + def token_usage(self) -> Any: + """累计 token 用量""" + return self._total_usage + + @classmethod + def create_for_brain( + cls, + event: AstrMessageEvent, + plugin_context: Any, + persona_config: dict, + ) -> "AgentMindSubStage": + """工厂方法:从高级人格配置创建 AgentMindSubStage + + Args: + event: 消息事件 + plugin_context: 插件上下文 + persona_config: 人格配置 + + Returns: + AgentMindSubStage 实例(已注册所有角色模型) + """ + # 获取 provider_settings + cfg = plugin_context.get_config(event.unified_msg_origin) + provider_settings = cfg.get("provider_settings", {}) + + # 获取 provider_wake_prefix + prov_wake = provider_settings.get("wake_prefix", "") + + instance = cls( + event=event, + plugin_context=plugin_context, + config=provider_settings, + provider_wake_prefix=prov_wake, + ) + + # 注册人格配置的模型 + instance.register_models_from_persona_config(persona_config) + + return instance diff --git a/astrbot/core/mind_sim/__init__.py b/astrbot/core/mind_sim/__init__.py new file mode 100644 index 0000000000..cb7f920ee7 --- /dev/null +++ b/astrbot/core/mind_sim/__init__.py @@ -0,0 +1,72 @@ +"""mind_sim - 高级人格的持续思考引擎 + +mind_sim 是高级人格的核心模块,负责: +- 持续循环思考 +- 管理多个并发动作 +- 协调动作之间的通信 +- 与外部(用户、平台)交互 + +核心概念: +- MindContext: 会话上下文状态 +- mind_sim: 主引擎,负责思考循环 +- Action: 独立运行的动作协程 +- Decision: LLM 产生的决策 + +使用示例: +```python +from astrbot.core.mind_sim import MindContext, MindSimLLM +from astrbot.core.mind_sim.private.actions import get_available_actions, create_action +import time + +# 创建上下文 +ctx = MindContext( + session_id="test", + unified_msg_origin="webchat:private:test", + is_private=True, + persona_id="advanced_1", + system_prompt="你是一个有帮助的助手", +) + +# 获取可用动作 +actions = get_available_actions(is_private=True) + +# 创建动作实例 +reply_action = create_action("reply", ctx) +``` +""" + +from .action import Action, ActionExecutor, PreExecuteResult, RunningAction, TempPrompt +from .context import MindContext +from .messages import ( + ActionOutput, + ActionSendMsg, + ActionState, + ActionStateUpdate, + ActionStopMsg, + Decision, + IncomingUserMessage, + MindEvent, + MindEventType, + MindMessage, +) + +__all__ = [ + # 核心 + "MindContext", + "Action", + "ActionExecutor", + "TempPrompt", + "PreExecuteResult", + "RunningAction", + # 消息类型 + "MindMessage", + "ActionState", + "ActionSendMsg", + "ActionStopMsg", + "ActionStateUpdate", + "ActionOutput", + "IncomingUserMessage", + "Decision", + "MindEvent", + "MindEventType", +] diff --git a/astrbot/core/mind_sim/action.py b/astrbot/core/mind_sim/action.py new file mode 100644 index 0000000000..b939e2a5d2 --- /dev/null +++ b/astrbot/core/mind_sim/action.py @@ -0,0 +1,656 @@ +"""Action 基类定义 + ActionExecutor 执行器 + +动作是独立运行的协程,通过消息与主思考通信。 +ActionExecutor 统一管理所有运行中的动作实例,支持同一动作多实例并发。 +""" + +import asyncio +import time +from abc import ABC, abstractmethod +from collections.abc import AsyncGenerator, Callable +from dataclasses import dataclass, field +from typing import Any + +from astrbot.core import logger + +from .messages import ( + ActionOutput, + ActionState, + ActionStateUpdate, + MindMessage, +) + +# ========== 预执行相关数据类 ========== + + +@dataclass +class TempPrompt: + """临时提示词 - 在指定轮数和时间内附加到主思考提示词 + + 由动作添加(before_execute 或运行中),每轮思考消耗一次, + remaining_rounds 降到 0 且超过 min_duration 后自动移除。 + + Attributes: + content: 提示词内容 + remaining_rounds: 剩余有效轮数 + min_duration: 最小保留时间(秒),默认 30 秒 + created_at: 创建时间戳 + source: 来源标识(如 "reply#1"、"wait#2") + """ + + content: str + remaining_rounds: int + min_duration: float = 30.0 # 最小保留时间(秒) + created_at: float = field(default_factory=time.time) + source: str = "" # 来源动作实例 ID + + +@dataclass +class PreExecuteResult: + """动作预执行结果 - 在动作 run() 之前返回,影响主思考 + + Attributes: + temp_prompts: 临时提示词列表(N轮后自动消失) + block: 是否阻塞主思考循环(等待中断) + block_timeout: 阻塞超时(秒) + block_reason: 阻塞原因描述(给日志用) + """ + + temp_prompts: list[TempPrompt] = field(default_factory=list) + block: bool = False + block_timeout: float = 60.0 + block_reason: str = "" + + +# ========== Action 基类 ========== + + +class Action(ABC): + """动作基类 + + 动作是独立运行的协程,具有以下特性: + 1. 独立运行:作为 asyncio.Task 运行,不阻塞主思考 + 2. 状态可见:state 随时可被主思考读取 + 3. 双向通信:可以接收主思考消息,也可以发送产出 + 4. 提示词贡献:可以贡献静态和动态提示词 + 5. 预执行钩子:run() 之前可以影响主思考(临时提示词、阻塞等) + + 子类需要实现: + - run(): 核心逻辑 + - before_execute(): 可选,预执行钩子 + """ + + # 类属性:动作元信息 + name: str = "base" # 动作名称(类型标识,非实例标识) + description: str = "" # 动作描述(给主思考看的) + usage_guide: str = "" # 使用条件/指南 + fixed_prompt: str = "" # 固定提示词贡献(静态) + priority: int = 0 # 优先级(用于排序显示) + + def __init__(self): + self.ctx: Any = None # MindContext,由 mind_sim 注入 + self.llm: Any = None # MindSimLLM,由 mind_sim 注入 + self.instance_id: str = "" # 运行时分配的实例 ID(如 reply#1) + self.inbox: asyncio.Queue[MindMessage] = asyncio.Queue() + self._task: asyncio.Task | None = None + self._state: ActionState = ActionState(action_name=self.name) + self._send_callback: Callable | None = None + self._temp_prompt_callback: Callable | None = None + self._executor: Any = None # ActionExecutor 引用,由 executor 注入 + self._params: dict = {} # 保存启动参数,用于 on_complete + self._cancelled: bool = False # 是否被 cancel(用于阻止被停止后继续发事件) + + def bind_context(self, ctx: Any) -> "Action": + """绑定上下文(由 ActionExecutor 调用)""" + self.ctx = ctx + return self + + def bind_llm(self, llm: Any) -> "Action": + """绑定 LLM(由 ActionExecutor 调用)""" + self.llm = llm + return self + + @property + def state(self) -> ActionState: + """获取当前状态(主思考会读取)""" + return self._state + + def update_state( + self, + status: str | None = None, + progress: str | None = None, + data: dict | None = None, + prompt_contribution: str | None = ..., # ... 表示未提供,None 表示清空 + can_receive: bool | None = None, + error: str | None = None, + ): + """更新状态""" + if status is not None: + self._state.status = status + if progress is not None: + self._state.progress = progress + if data is not None: + self._state.data.update(data) + if prompt_contribution is not ...: + self._state.prompt_contribution = prompt_contribution + if can_receive is not None: + self._state.can_receive = can_receive + if error is not None: + self._state.error = error + self._state.updated_at = time.time() + + def set_send_callback(self, callback: Callable): + """设置发送回调(由 ActionExecutor 调用)""" + self._send_callback = callback + + def set_temp_prompt_callback(self, callback: Callable): + """设置临时提示词回调(由 ActionExecutor 调用)""" + self._temp_prompt_callback = callback + + def add_temp_prompt( + self, content: str, rounds: int = 5, min_duration: float = 30.0 + ) -> None: + """添加临时提示词(动作运行中调用) + + Args: + content: 提示词内容 + rounds: 有效轮数(默认5轮) + min_duration: 最小保留时间(秒),默认30秒 + """ + if hasattr(self, "_temp_prompt_callback") and self._temp_prompt_callback: + self._temp_prompt_callback( + TempPrompt( + content=content, + remaining_rounds=rounds, + min_duration=min_duration, + source=self.instance_id or self.name, + ) + ) + + async def send_to_main(self, output: ActionOutput): + """发送产出给主思考""" + if self._send_callback: + state_update = ActionStateUpdate( + action_name=self.instance_id or self.name, + state=self._state, + ) + await self._send_callback(state_update) + await self._send_callback(output) + + async def receive(self, msg: MindMessage): + """接收主思考发来的消息""" + await self.inbox.put(msg) + + async def check_message(self, timeout: float = 0) -> MindMessage | None: + """检查是否有来自主思考的消息 + + Args: + timeout: 超时时间(秒)。0 表示非阻塞检查。 + + Returns: + MindMessage 或 None + """ + try: + if timeout > 0: + return await asyncio.wait_for(self.inbox.get(), timeout=timeout) + elif self.inbox.empty(): + return None + else: + return self.inbox.get_nowait() + except asyncio.QueueEmpty: + return None + except asyncio.TimeoutError: + return None + + async def before_execute(self, params: dict) -> PreExecuteResult | None: + """预执行钩子 - 在 run() 之前被主思考调用 + + 可以用来影响主思考,例如: + - 给接下来 N 轮加上临时提示词(如 "你在 X 轮之前回复了") + - 阻塞主思考循环(等待用户消息或动作消息打断) + + Args: + params: 启动参数(来自 START 决策的 JSON 参数) + + Returns: + PreExecuteResult 或 None(无影响) + """ + return None + + async def on_complete(self, params: dict) -> None: + """完成钩子 - 在 run() 完成(正常完成、非停止)后调用 + + 可以用来添加临时提示词,例如: + - "已回复 xxx" + - "已等待 xxx 秒" + + Args: + params: 启动参数(来自 START 决策的 JSON 参数) + """ + pass + + async def on_stop(self) -> None: + """停止钩子 - 在动作被强制停止时调用 + + 子类可以重写此方法来清理资源。 + """ + pass + + def get_completion_output(self) -> ActionOutput | None: + """获取完成后要发送的事件 + + 子类可以重写此方法来定义完成后的行为: + - 返回 ActionOutput: 发送该事件(type="completed" 会触发重新思考) + - 返回 None: 不发送任何事件,不触发重新思考 + + 默认行为:发送 type="completed" 的事件,触发主思考重新思考 + + Returns: + ActionOutput 或 None + """ + return ActionOutput( + action_name=self.instance_id or self.name, + type="completed", + content="", + ) + + @abstractmethod + async def run(self, params: dict) -> AsyncGenerator[ActionOutput, None]: + """运行动作(子类实现) + + Args: + params: 启动参数(来自主思考的 START 决策) + + Yields: + ActionOutput: 产出 + + 注意: + - 应该定期 check_message() 检查主思考发来的消息 + - 收到 ActionStopMsg 应该清理并退出 + - 收到 ActionSendMsg 应该根据消息调整行为 + """ + ... + + async def start(self, params: dict) -> asyncio.Task: + """启动动作(由 ActionExecutor 调用)""" + self._state = ActionState( + action_name=self.instance_id or self.name, + status="running", + created_at=time.time(), + updated_at=time.time(), + ) + self._task = asyncio.create_task(self._run_wrapper(params)) + return self._task + + async def _run_wrapper(self, params: dict): + """包装 run(),处理状态更新和异常""" + self._params = params # 保存参数供 on_complete 使用 + try: + async for output in self.run(params): + # 如果已被 cancel,跳过发送任何产出,立即退出 + if self._cancelled: + break + await self.send_to_main(output) + self._state.status = "completed" + self._state.prompt_contribution = None + + # 调用完成钩子(添加临时提示词等) + await self.on_complete(params) + + # 获取子类定义的完成事件(可能为 None) + completion_output = self.get_completion_output() + # 被 cancel 时不发送完成事件 + if completion_output and not self._cancelled: + await self.send_to_main(completion_output) + except asyncio.CancelledError: + # CancelledError 继承自 Exception (Python 3.8+), + # asyncio.create_task 会吞掉从 async def 函数中 raise 的 CancelledError。 + # 所以这里不 re-raise,而是正常返回,让 finally 统一处理清理。 + self._cancelled = True + self._state.status = "stopped" + self._state.prompt_contribution = None + except Exception as e: + self._state.status = "error" + self._state.error = str(e) + self._state.prompt_contribution = None + await self.send_to_main( + ActionOutput( + action_name=self.instance_id or self.name, + type="error", + content=f"动作执行出错: {e}", + metadata={"error": str(e)}, + ) + ) + finally: + # 统一清理:无论是正常完成、cancel、还是异常,都调用 on_stop() + if self._cancelled: + # noinspection PyBroadException + try: + await self.on_stop() + except Exception as e: + logger.debug(f"[Action] on_stop 异常: {e}") + + async def stop(self, reason: str = ""): + """强制停止动作(外部杀掉) + + 三件事同时发生: + 1. 设置 _cancelled 标志,阻止后续产出发送 + 2. cancel asyncio.Task,让动作立即从 await 点退出 + 3. 清理持有的资源(AgentRunner 等) + """ + self._cancelled = True + self._state.status = "stopped" + self._state.progress = f"已停止: {reason}" if reason else "已停止" + if self._task and not self._task.done(): + self._task.cancel() + # 不 await task,避免等待阻塞中的 check_message 超时 + + def is_running(self) -> bool: + """是否正在运行""" + return self._state.status == "running" + + def is_done(self) -> bool: + """是否已完成(包括成功、停止、错误)""" + return self._state.status in ("completed", "stopped", "error") + + def get_info(self) -> dict: + """获取动作信息(给主思考看)""" + return { + "name": self.name, + "description": self.description, + "fixed_prompt": self.fixed_prompt, + "priority": self.priority, + "status": self._state.status, + } + + +# ========== 运行中动作实例 ========== + + +@dataclass +class RunningAction: + """正在运行的动作实例""" + + instance_id: str # 唯一实例 ID(如 reply#1, reply#2) + action_name: str # 动作类名(如 reply) + action: Action # 动作实例 + task: asyncio.Task # asyncio 任务 + started_at: float = field(default_factory=time.time) + + +# ========== ActionExecutor 动作执行器 ========== + + +class ActionExecutor: + """动作执行器 - 统一管理正在运行的动作实例 + + 核心职责: + 1. 注册动作类(Action 子类),作为工厂按需创建实例 + 2. 启动动作实例(同一动作可多次启动,通过 instance_id 区分) + 3. 向运行中的实例发送消息 / 停止实例 + 4. 自动清理已完成的实例 + 5. 管理临时提示词(由动作的 before_execute 添加) + 6. 提供运行中动作的状态摘要(给 prompts 用) + + instance_id 格式:<动作名>#<序号>,如 reply#1, reply#2, wait#1 + """ + + def __init__(self, ctx: Any, send_callback: Callable, llm: Any = None): + """初始化执行器 + + Args: + ctx: MindContext 会话上下文(绑定到每个新建的动作实例) + send_callback: 动作产出回调(连接到 Brain 的事件队列) + llm: MindSimLLM 实例,供动作调用 LLM + """ + self._action_classes: dict[str, type[Action]] = {} + self._running: dict[str, RunningAction] = {} + self._counter: dict[str, int] = {} # 动作名 → 累计计数 + self._ctx = ctx + self._send_callback = send_callback + self._llm = llm + self._temp_prompts: list[TempPrompt] = [] + + def _add_temp_prompt(self, temp_prompt: TempPrompt) -> None: + """添加临时提示词(由 Action.add_temp_prompt 回调)""" + self._temp_prompts.append(temp_prompt) + logger.debug( + f"[ActionExecutor] 添加临时提示词 (来源: {temp_prompt.source}, " + f"剩余轮数: {temp_prompt.remaining_rounds}): {temp_prompt.content[:50]}..." + ) + + def register(self, action_cls: type[Action]): + """注册动作类""" + self._action_classes[action_cls.name] = action_cls + logger.debug(f"[ActionExecutor] 注册动作类: {action_cls.name}") + + def get_action_class_names(self) -> list[str]: + """获取所有已注册的动作类名""" + return list(self._action_classes.keys()) + + def get_action_infos(self) -> list[dict]: + """获取所有动作类的元信息(给 prompts 用,展示可用动作列表) + + Returns: + 按 priority 降序排列的动作元信息列表 + """ + infos = [] + for name, cls in self._action_classes.items(): + # 统计该动作当前运行中的实例数 + running_count = sum( + 1 + for r in self._running.values() + if r.action_name == name and r.action.is_running() + ) + infos.append( + { + "name": cls.name, + "description": cls.description or "", + "usage_guide": cls.usage_guide or "", + "fixed_prompt": cls.fixed_prompt or "", + "priority": cls.priority, + "running_count": running_count, + } + ) + return sorted(infos, key=lambda x: x["priority"], reverse=True) + + async def start( + self, action_name: str, params: dict + ) -> tuple[str, PreExecuteResult | None]: + """启动动作实例 + + Args: + action_name: 动作类名(如 "reply") + params: 启动参数 + + Returns: + (instance_id, pre_execute_result) + + Raises: + ValueError: 未知动作类名 + """ + cls = self._action_classes.get(action_name) + if not cls: + raise ValueError(f"未知动作: {action_name}") + + # 创建新实例 + instance = cls() + instance.bind_context(self._ctx) + instance.bind_llm(self._llm) + instance.set_send_callback(self._send_callback) + instance.set_temp_prompt_callback(self._add_temp_prompt) + instance._executor = self + + # 生成唯一 instance_id + count = self._counter.get(action_name, 0) + 1 + self._counter[action_name] = count + instance_id = f"{action_name}#{count}" + instance.instance_id = instance_id + + # 调用预执行钩子 + pre_result = await instance.before_execute(params) + if pre_result and pre_result.temp_prompts: + self._temp_prompts.extend(pre_result.temp_prompts) + + # 启动动作 + task = await instance.start(params) + self._running[instance_id] = RunningAction( + instance_id=instance_id, + action_name=action_name, + action=instance, + task=task, + ) + + logger.info(f"[ActionExecutor] 启动动作实例: {instance_id}") + return instance_id, pre_result + + async def send_to(self, instance_id: str, msg: MindMessage): + """向指定实例发送消息""" + running = self._running.get(instance_id) + if running and running.action.is_running(): + await running.action.receive(msg) + else: + logger.warning( + f"[ActionExecutor] 无法发送消息到 {instance_id}: 实例不存在或已停止" + ) + + async def stop_instance(self, instance_id: str, reason: str = ""): + """停止指定实例""" + running = self._running.get(instance_id) + if running: + await running.action.stop(reason) + logger.info(f"[ActionExecutor] 停止实例: {instance_id}") + else: + logger.warning(f"[ActionExecutor] 无法停止 {instance_id}: 实例不存在") + + async def stop_by_name(self, action_name: str, reason: str = ""): + """停止指定动作名的所有实例""" + for iid, running in list(self._running.items()): + if running.action_name == action_name and running.action.is_running(): + await running.action.stop(reason) + logger.info(f"[ActionExecutor] 按名称停止实例: {iid}") + + async def cleanup_completed(self) -> list[str]: + """清理已完成的动作实例 + + Returns: + 被清理的 instance_id 列表 + """ + to_remove = [iid for iid, r in self._running.items() if r.action.is_done()] + for iid in to_remove: + del self._running[iid] + if to_remove: + logger.debug(f"[ActionExecutor] 清理已完成实例: {to_remove}") + return to_remove + + def get_running_states(self) -> list[dict]: + """获取所有运行中动作的状态(给 prompts 用) + + Returns: + 运行中实例的状态列表,每项包含: + - instance_id: 实例 ID + - action_name: 动作类名 + - state: ActionState 对象 + """ + states = [] + for iid, running in self._running.items(): + if running.action.is_running(): + states.append( + { + "instance_id": iid, + "action_name": running.action_name, + "state": running.action.state, + } + ) + return states + + def tick_temp_prompts(self, consume_rounds: bool = True) -> list[str]: + """消耗一轮临时提示词 + + 返回本轮生效的临时提示词内容列表(带时间信息), + 同时将剩余轮数减 1,清除已过期的(轮数为0且超过最小保留时间)。 + + 格式:"[距离现在Xs] 原始内容" + + Args: + consume_rounds: 是否消耗轮数,默认 True + + Returns: + 本轮生效的临时提示词内容(带时间戳) + """ + import time + + now = time.time() + active = [] + remaining = [] + for tp in self._temp_prompts: + elapsed = now - tp.created_at + + # 检查是否应该保留:轮数 > 0 或者未达到最小保留时间 + should_keep = tp.remaining_rounds > 0 or elapsed < tp.min_duration + + if should_keep: + # 格式化时间显示 + elapsed_int = int(elapsed) + if elapsed_int < 60: + time_str = f"{elapsed_int}秒" + elif elapsed_int < 3600: + time_str = f"{elapsed_int // 60}分{elapsed_int % 60}秒" + else: + time_str = ( + f"{elapsed_int // 3600}小时{(elapsed_int % 3600) // 60}分" + ) + + # 添加时间信息 + formatted = f"[{tp.source} 已完成,距离现在 {time_str}] {tp.content}" + active.append(formatted) + + if consume_rounds: + tp.remaining_rounds -= 1 + # 只有轮数 > 0 或未达到最小时间才保留 + if tp.remaining_rounds > 0 or elapsed < tp.min_duration: + remaining.append(tp) + else: + remaining.append(tp) + + if consume_rounds: + self._temp_prompts = remaining + return active + + def has_running(self) -> bool: + """是否有动作正在运行""" + return any(r.action.is_running() for r in self._running.values()) + + async def stop_all(self, reason: str = ""): + """停止所有动作""" + for running in self._running.values(): + if running.action.is_running(): + await running.action.stop(reason) + self._running.clear() + logger.info("[ActionExecutor] 已停止所有动作") + + def resolve_instance_id(self, target: str) -> str | None: + """解析目标标识为 instance_id + + 支持两种输入: + - 直接 instance_id: "reply#1" → "reply#1" + - 动作名(取最新的运行中实例): "reply" → "reply#2" + + Returns: + instance_id 或 None + """ + # 直接匹配 + if target in self._running: + return target + + # 按动作名匹配(取最新的运行中实例) + candidates = [ + (iid, r) + for iid, r in self._running.items() + if r.action_name == target and r.action.is_running() + ] + if candidates: + candidates.sort(key=lambda x: x[1].started_at, reverse=True) + return candidates[0][0] + + return None diff --git a/astrbot/core/mind_sim/context.py b/astrbot/core/mind_sim/context.py new file mode 100644 index 0000000000..8658958211 --- /dev/null +++ b/astrbot/core/mind_sim/context.py @@ -0,0 +1,75 @@ +"""mind_sim 上下文状态""" + +from dataclasses import dataclass, field +from typing import TYPE_CHECKING, Any + +from .messages import ActionState + +if TYPE_CHECKING: + pass + + +@dataclass +class MindContext: + """mind_sim 会话上下文 + + 包含会话的所有状态信息,主思考和所有动作共享此上下文。 + """ + + # 会话标识 + session_id: str + unified_msg_origin: str + is_private: bool + + # 人格配置 + persona_id: str + system_prompt: str = "" + personality_config: dict = field(default_factory=dict) + chat_config: dict = field(default_factory=dict) + robot_config: dict = field(default_factory=dict) + + # 动作状态(主思考从这里读取) + action_states: dict[str, ActionState] = field(default_factory=dict) + + # 用户信息 + user_id: str = "" + user_name: str = "" + + # 自由存储区(动作可以存取) + memory: dict = field(default_factory=dict) + + # 数据库对话管理器和对话ID(用于读取历史记录) + conv_manager: Any = field(default=None) + conversation_id: str = "" + + # 运行时上下文(用于动作调用外部服务) + event: Any = field(default=None) # AstrMessageEvent + plugin_context: Any = field(default=None) # PluginContext + + def get_action_state(self, action_name: str) -> ActionState | None: + """获取指定动作的状态""" + return self.action_states.get(action_name) + + def get_running_actions(self) -> list[str]: + """获取所有正在运行的动作名称""" + return [ + name + for name, state in self.action_states.items() + if state.status == "running" + ] + + def has_running_action(self, action_name: str) -> bool: + """检查指定动作是否正在运行""" + state = self.action_states.get(action_name) + return state is not None and state.status == "running" + + def to_prompt_context(self) -> dict: + """转换为提示词上下文(供主思考使用)""" + return { + "session_id": self.session_id, + "is_private": self.is_private, + "persona_id": self.persona_id, + "user_name": self.user_name, + "running_actions": self.get_running_actions(), + "memory_keys": list(self.memory.keys()), + } diff --git a/astrbot/core/mind_sim/dispatcher.py b/astrbot/core/mind_sim/dispatcher.py new file mode 100644 index 0000000000..737102815d --- /dev/null +++ b/astrbot/core/mind_sim/dispatcher.py @@ -0,0 +1,98 @@ +"""MindSim 简化的 Brain 工厂模块 + +只负责根据会话类型创建/管理 Brain 实例,不再是全局单例。 +由 internal_mind 持有和管理。 + +调度逻辑: +- 私聊:mind_sim.private.brain.PrivateBrain +- 群聊:降级为私聊处理(暂未实现群聊) +""" + +import asyncio +from collections.abc import AsyncGenerator + +from astrbot.core import logger +from astrbot.core.mind_sim.context import MindContext +from astrbot.core.mind_sim.messages import MindEvent + + +class PrivateBrainFactory: + """简化的 Brain 工厂 + + 不再是全局单例,由 internal_mind 持有。 + 职责: + 1. 根据 session_id 管理 Brain 实例映射 + 2. 创建 Brain 时根据会话类型选择处理模块(私聊/群聊) + 3. 提供 dispatch 方法启动事件流 + """ + + def __init__(self): + self._instances: dict = {} + self._lock = asyncio.Lock() + + async def dispatch( + self, + ctx: MindContext, + message: str, + sender_id: str, + sender_name: str, + persona: dict | None = None, + ) -> AsyncGenerator[MindEvent, None]: + """分发消息到对应的 Brain + + Args: + ctx: MindContext 会话上下文 + message: 用户消息 + sender_id: 发送者 ID + sender_name: 发送者名称 + persona: 高级人格配置 + + Yields: + MindEvent: 事件流 + """ + session_id = ctx.session_id + is_new_instance = False + + async with self._lock: + if session_id not in self._instances: + # 根据会话类型选择处理模块 + if ctx.is_private: + from .private.brain import PrivateBrain + + handler = PrivateBrain(ctx, persona=persona) + handler.init_llm(ctx.event, ctx.plugin_context, persona) + logger.info(f"[BrainFactory] 创建私聊 Brain 实例: {session_id}") + else: + # 群聊暂未实现,降级为私聊处理 + logger.warning( + f"[BrainFactory] 群聊处理暂未实现,降级为私聊处理: {session_id}" + ) + from .private.brain import PrivateBrain + + handler = PrivateBrain(ctx, persona=persona) + handler.init_llm(ctx.event, ctx.plugin_context, persona) + handler._is_fallback = True + + self._instances[session_id] = handler + is_new_instance = True + else: + handler = self._instances[session_id] + + # 发送消息到 Brain + await handler.handle_message(message, sender_id, sender_name) + + # 只有首次创建实例或没有活跃的事件流时才监听 + if is_new_instance or not handler._stream_active: + async for event in handler.get_event_stream(): + yield event + else: + logger.debug(f"[BrainFactory] 实例 {session_id} 已有活跃事件流,仅投递消息") + + async def stop_all(self): + """停止所有 Brain 实例""" + async with self._lock: + for session_id, instance in self._instances.items(): + await instance.stop() + logger.info(f"[BrainFactory] 停止实例: {session_id}") + self._instances.clear() + logger.info("[BrainFactory] 已停止所有实例") diff --git a/astrbot/core/mind_sim/group/__init__.py b/astrbot/core/mind_sim/group/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/astrbot/core/mind_sim/memory/__init__.py b/astrbot/core/mind_sim/memory/__init__.py new file mode 100644 index 0000000000..26d0125a1f --- /dev/null +++ b/astrbot/core/mind_sim/memory/__init__.py @@ -0,0 +1,5 @@ +"""MindSim 记忆系统""" + +from .manager import MemoryManager + +__all__ = ["MemoryManager"] diff --git a/astrbot/core/mind_sim/memory/chat_summarizer.py b/astrbot/core/mind_sim/memory/chat_summarizer.py new file mode 100644 index 0000000000..8f9c06ba02 --- /dev/null +++ b/astrbot/core/mind_sim/memory/chat_summarizer.py @@ -0,0 +1,573 @@ +"""MindSim 对话记忆总结器 + +复刻 MaiBot ChatHistorySummarizer 核心逻辑: +- 累积消息 → 话题识别 → 话题总结 → 持久化存储 +- 话题缓存持久化到数据库 JSON 字段,避免重启丢失 +""" + +import difflib +import json +import time +from dataclasses import dataclass, field + +from astrbot.core import logger +from astrbot.core.db import BaseDatabase +from astrbot.core.mind_sim.AgentMindSubStage import AgentMindSubStage + +from .models import MindSimChatMemory +from .prompts import TOPIC_ANALYSIS_PROMPT, TOPIC_SUMMARY_PROMPT +from .utils import extract_json_from_response + + +@dataclass +class MessageItem: + """单条消息""" + + user_id: str + nickname: str + content: str + role: str # "user" | "assistant" + timestamp: float = field(default_factory=time.time) + + def to_readable(self, idx: int) -> str: + """转为带编号的可读文本""" + return f"{idx}. [{self.nickname}]: {self.content}" + + def to_text(self) -> str: + """转为不带编号的文本""" + return f"[{self.nickname}]: {self.content}" + + +@dataclass +class TopicCacheItem: + """话题缓存项 + + Attributes: + topic: 话题标题(一句话描述时间、人物、事件和主题) + messages: 与该话题相关的消息字符串列表 + participants: 涉及到的发言人昵称集合 + no_update_checks: 连续多少次"检查"没有新增内容 + """ + + topic: str + messages: list[str] = field(default_factory=list) + participants: set[str] = field(default_factory=set) + no_update_checks: int = 0 + + +class ChatSummarizer: + """对话记忆总结器 + + 核心流程(与 MaiBot 一致): + 1. add_message() - 外部推入消息 + 2. process() - 定期调用,检查是否需要话题识别 + 3. 触发条件:消息数≥30 或 距上次检查>2小时且消息≥10 + 4. _run_topic_check() - LLM识别话题,返回 topic→indices + 5. 话题相似度检查(difflib, 阈值90%) + 6. 更新 topic_cache,无更新的话题 no_update_checks+1 + 7. 打包条件:连续3次无更新 或 消息>5条 + 8. _finalize_and_store_topic() - LLM总结 → 写入数据库 + """ + + def __init__( + self, + chat_id: str, + agent_mind: AgentMindSubStage, + db: BaseDatabase, + check_interval: int = 60, + ): + self.chat_id = chat_id + self.agent_mind = agent_mind + self.db = db + self.check_interval = check_interval + + # 消息缓冲区 + self.message_buffer: list[MessageItem] = [] + self.buffer_start_time: float = 0.0 + self.buffer_end_time: float = 0.0 + + # 话题缓存 + self.topic_cache: dict[str, TopicCacheItem] = {} + + # 时间记录 + self.last_topic_check_time: float = time.time() + + # 日志前缀 + self._log_prefix = f"[记忆-{chat_id[:8] if len(chat_id) > 8 else chat_id}]" + + def add_message( + self, user_id: str, nickname: str, content: str, role: str = "user" + ): + """外部推入消息""" + if not content or not content.strip(): + return + + msg = MessageItem( + user_id=user_id, + nickname=nickname, + content=content.strip(), + role=role, + ) + self.message_buffer.append(msg) + + now = time.time() + if not self.buffer_start_time: + self.buffer_start_time = now + self.buffer_end_time = now + + async def process(self): + """处理消息缓冲区,检查是否需要话题识别""" + if not self.message_buffer: + return + + current_time = time.time() + message_count = len(self.message_buffer) + time_since_last_check = current_time - self.last_topic_check_time + + # 检查触发条件(阈值比 MaiBot 小,适配私聊场景) + should_check = False + + # 条件1: 消息数量 >= 30 + if message_count >= 30: + should_check = True + logger.info( + f"{self._log_prefix} 触发检查: 消息数量达到 {message_count} 条(阈值: 30)" + ) + + # 条件2: 距上次检查 > 2小时 且消息 >= 10 条 + elif time_since_last_check > 7200 and message_count >= 10: + should_check = True + logger.info( + f"{self._log_prefix} 触发检查: 距上次 {time_since_last_check / 3600:.1f}h 且消息 {message_count} 条" + ) + + if should_check: + await self._run_topic_check_and_update_cache() + # 清空缓冲区 + self.message_buffer.clear() + self.buffer_start_time = 0.0 + self.buffer_end_time = 0.0 + self.last_topic_check_time = current_time + + async def _run_topic_check_and_update_cache(self): + """执行话题检查并更新缓存 + + 与 MaiBot _run_topic_check_and_update_cache 逻辑一致: + 1. 检查是否有 assistant 发言 + 2. 构造编号消息 + 3. LLM 识别话题 + 4. 相似度合并 + 5. 更新缓存 + 6. 检查打包条件 + """ + messages = self.message_buffer + if not messages: + return + + start_time = self.buffer_start_time or time.time() + end_time = self.buffer_end_time or time.time() + + logger.info(f"{self._log_prefix} 开始话题检查 | 消息数: {len(messages)}") + + # 1. 检查是否有 assistant 发言 + has_bot_message = any(m.role == "assistant" for m in messages) + if not has_bot_message: + logger.info(f"{self._log_prefix} 当前批次无 Bot 发言,跳过") + return + + # 2. 构造编号消息 + numbered_lines: list[str] = [] + index_to_text: dict[int, str] = {} + index_to_participants: dict[int, set[str]] = {} + + for idx, msg in enumerate(messages, start=1): + line = msg.to_readable(idx) + numbered_lines.append(line) + index_to_text[idx] = msg.to_text() + index_to_participants[idx] = {msg.nickname} + + # 3. LLM 识别话题(最多重试3次) + existing_topics = list(self.topic_cache.keys()) + topic_to_indices: dict[str, list[int]] = {} + success = False + + for attempt in range(1, 4): + success, topic_to_indices = await self._analyze_topics_with_llm( + numbered_lines, existing_topics + ) + if success and topic_to_indices: + if attempt > 1: + logger.info(f"{self._log_prefix} 话题识别第 {attempt} 次重试成功") + break + logger.warning(f"{self._log_prefix} 话题识别第 {attempt} 次失败") + + if not success or not topic_to_indices: + logger.error(f"{self._log_prefix} 话题识别连续3次失败,放弃本次检查") + return + + # 4. 相似度合并(与 MaiBot 一致,阈值90%) + topic_mapping = self._build_topic_mapping(topic_to_indices, 0.9) + if topic_mapping: + new_topic_to_indices: dict[str, list[int]] = {} + for new_topic, indices in topic_to_indices.items(): + if new_topic in topic_mapping: + historical_topic = topic_mapping[new_topic] + if historical_topic in new_topic_to_indices: + combined = list( + set(new_topic_to_indices[historical_topic] + indices) + ) + new_topic_to_indices[historical_topic] = combined + else: + new_topic_to_indices[historical_topic] = indices + else: + new_topic_to_indices[new_topic] = indices + topic_to_indices = new_topic_to_indices + + # 5. 更新缓存 + updated_topics: set[str] = set() + + for topic, indices in topic_to_indices.items(): + if not indices: + continue + + item = self.topic_cache.get(topic) + if not item: + item = TopicCacheItem(topic=topic) + self.topic_cache[topic] = item + + topic_msg_texts: list[str] = [] + new_participants: set[str] = set() + for idx in indices: + msg_text = index_to_text.get(idx) + if not msg_text: + continue + topic_msg_texts.append(msg_text) + new_participants.update(index_to_participants.get(idx, set())) + + if not topic_msg_texts: + continue + + merged_text = "\n".join(topic_msg_texts) + item.messages.append(merged_text) + item.participants.update(new_participants) + item.no_update_checks = 0 + updated_topics.add(topic) + + # 对未更新的话题 no_update_checks + 1 + for topic, item in list(self.topic_cache.items()): + if topic not in updated_topics: + item.no_update_checks += 1 + + # 6. 检查打包条件(与 MaiBot 一致) + topics_to_finalize: list[str] = [] + for topic, item in self.topic_cache.items(): + if item.no_update_checks >= 3: + logger.info(f"{self._log_prefix} 话题[{topic}] 连续3次无新增,触发打包") + topics_to_finalize.append(topic) + continue + if len(item.messages) > 5: + logger.info(f"{self._log_prefix} 话题[{topic}] 消息超过5条,触发打包") + topics_to_finalize.append(topic) + + for topic in topics_to_finalize: + item = self.topic_cache.get(topic) + if not item: + continue + try: + await self._finalize_and_store_topic( + topic=topic, + item=item, + start_time=start_time, + end_time=end_time, + ) + finally: + self.topic_cache.pop(topic, None) + + async def _analyze_topics_with_llm( + self, + numbered_lines: list[str], + existing_topics: list[str], + ) -> tuple[bool, dict[str, list[int]]]: + """使用 LLM 识别话题(与 MaiBot _analyze_topics_with_llm 一致)""" + if not numbered_lines: + return False, {} + + history_topics_block = ( + "\n".join(f"- {t}" for t in existing_topics) + if existing_topics + else "(当前无历史话题)" + ) + messages_block = "\n".join(numbered_lines) + + prompt = TOPIC_ANALYSIS_PROMPT.format( + history_topics_block=history_topics_block, + messages_block=messages_block, + ) + + try: + response = await self.agent_mind.call_simple(prompt=prompt, role="fast") + + logger.debug(f"{self._log_prefix} 话题识别响应: {response[:200]}...") + + result = extract_json_from_response(response) + if not isinstance(result, list): + logger.error(f"{self._log_prefix} 话题识别返回非列表: {result}") + return False, {} + + topic_to_indices: dict[str, list[int]] = {} + for item in result: + if not isinstance(item, dict): + continue + topic = item.get("topic") + indices = item.get("message_indices") or item.get("messages") or [] + if not topic or not isinstance(topic, str): + continue + if isinstance(indices, list): + valid_indices: list[int] = [] + for v in indices: + try: + iv = int(v) + if iv > 0: + valid_indices.append(iv) + except (TypeError, ValueError): + continue + if valid_indices: + topic_to_indices[topic] = valid_indices + + return True, topic_to_indices + + except Exception as e: + logger.error(f"{self._log_prefix} 话题识别 LLM 调用失败: {e}") + return False, {} + + def _find_most_similar_topic( + self, + new_topic: str, + existing_topics: list[str], + similarity_threshold: float = 0.9, + ) -> tuple[str, float] | None: + """查找最相似的历史话题(与 MaiBot 一致)""" + if not existing_topics: + return None + + best_match = None + best_similarity = 0.0 + + for existing_topic in existing_topics: + similarity = difflib.SequenceMatcher( + None, new_topic, existing_topic + ).ratio() + if similarity > best_similarity: + best_similarity = similarity + best_match = existing_topic + + if best_match and best_similarity >= similarity_threshold: + return (best_match, best_similarity) + return None + + def _build_topic_mapping( + self, + topic_to_indices: dict[str, list[int]], + similarity_threshold: float = 0.9, + ) -> dict[str, str]: + """构建新话题到历史话题的映射(与 MaiBot 一致)""" + existing_topics_list = list(self.topic_cache.keys()) + topic_mapping: dict[str, str] = {} + + for new_topic in topic_to_indices.keys(): + if new_topic in existing_topics_list: + continue + result = self._find_most_similar_topic( + new_topic, existing_topics_list, similarity_threshold + ) + if result: + historical_topic, similarity = result + topic_mapping[new_topic] = historical_topic + logger.info( + f"{self._log_prefix} 话题相似度: '{new_topic}' ≈ '{historical_topic}' ({similarity:.0%})" + ) + + return topic_mapping + + async def _finalize_and_store_topic( + self, + topic: str, + item: TopicCacheItem, + start_time: float, + end_time: float, + ): + """对话题进行最终打包存储(与 MaiBot 一致)""" + if not item.messages: + logger.info(f"{self._log_prefix} 话题[{topic}] 无消息,跳过") + return + + original_text = "\n".join(item.messages) + + logger.info( + f"{self._log_prefix} 打包话题[{topic}] | 消息段数: {len(item.messages)}" + ) + + # LLM 总结 + success, keywords, summary, key_point = await self._compress_with_llm( + original_text, topic + ) + if not success: + logger.warning(f"{self._log_prefix} 话题[{topic}] LLM 概括失败") + return + + participants = list(item.participants) + + await self._store_to_database( + start_time=start_time, + end_time=end_time, + original_text=original_text, + participants=participants, + theme=topic, + keywords=keywords, + summary=summary, + key_point=key_point, + ) + + logger.info( + f"{self._log_prefix} 话题[{topic}] 存储成功 | 参与者: {len(participants)}" + ) + + async def _compress_with_llm( + self, original_text: str, topic: str + ) -> tuple[bool, list[str], str, list[str]]: + """使用 LLM 总结话题(与 MaiBot _compress_with_llm 一致)""" + prompt = TOPIC_SUMMARY_PROMPT.format(topic=topic, original_text=original_text) + + try: + response = await self.agent_mind.call_simple(prompt=prompt, role="fast") + + result = extract_json_from_response(response) + if not isinstance(result, dict): + logger.error(f"{self._log_prefix} 话题总结返回非字典: {result}") + return False, [], "", [] + + keywords = result.get("keywords", []) + summary = result.get("summary", "") + key_point = result.get("key_point", []) + + if not isinstance(keywords, list): + keywords = [] + if not isinstance(summary, str) or not summary: + return False, [], "", [] + if not isinstance(key_point, list): + key_point = [] + + return True, keywords, summary, key_point + + except Exception as e: + logger.error(f"{self._log_prefix} 话题总结 LLM 调用失败: {e}") + return False, [], "", [] + + async def _store_to_database( + self, + start_time: float, + end_time: float, + original_text: str, + participants: list[str], + theme: str, + keywords: list[str], + summary: str, + key_point: list[str] | None = None, + ): + """存储到数据库""" + try: + record = MindSimChatMemory( + chat_id=self.chat_id, + start_time=start_time, + end_time=end_time, + original_text=original_text, + participants=json.dumps(participants, ensure_ascii=False), + theme=theme, + keywords=json.dumps(keywords, ensure_ascii=False), + summary=summary, + key_point=( + json.dumps(key_point, ensure_ascii=False) if key_point else None + ), + count=0, + ) + + async with self.db.get_db() as session: + async with session.begin(): + session.add(record) + + logger.debug(f"{self._log_prefix} 成功存储聊天记忆到数据库") + + except Exception as e: + logger.error(f"{self._log_prefix} 存储到数据库失败: {e}") + import traceback + + traceback.print_exc() + + def get_topic_cache_snapshot(self) -> dict: + """获取话题缓存快照(用于持久化)""" + return { + "last_topic_check_time": self.last_topic_check_time, + "topics": { + topic: { + "messages": item.messages, + "participants": list(item.participants), + "no_update_checks": item.no_update_checks, + } + for topic, item in self.topic_cache.items() + }, + "buffer": { + "messages": [ + { + "user_id": m.user_id, + "nickname": m.nickname, + "content": m.content, + "role": m.role, + "timestamp": m.timestamp, + } + for m in self.message_buffer + ], + "start_time": self.buffer_start_time, + "end_time": self.buffer_end_time, + }, + } + + def load_from_snapshot(self, data: dict): + """从快照恢复状态""" + if not data: + return + + self.last_topic_check_time = data.get( + "last_topic_check_time", self.last_topic_check_time + ) + + # 恢复话题缓存 + topics_data = data.get("topics", {}) + for topic, payload in topics_data.items(): + self.topic_cache[topic] = TopicCacheItem( + topic=topic, + messages=payload.get("messages", []), + participants=set(payload.get("participants", [])), + no_update_checks=payload.get("no_update_checks", 0), + ) + + # 恢复消息缓冲区 + buffer_data = data.get("buffer", {}) + buffer_messages = buffer_data.get("messages", []) + for m in buffer_messages: + self.message_buffer.append( + MessageItem( + user_id=m.get("user_id", ""), + nickname=m.get("nickname", ""), + content=m.get("content", ""), + role=m.get("role", "user"), + timestamp=m.get("timestamp", time.time()), + ) + ) + self.buffer_start_time = buffer_data.get("start_time", 0.0) + self.buffer_end_time = buffer_data.get("end_time", 0.0) + + if self.topic_cache or self.message_buffer: + logger.info( + f"{self._log_prefix} 恢复缓存: {len(self.topic_cache)} 个话题, " + f"{len(self.message_buffer)} 条消息" + ) diff --git a/astrbot/core/mind_sim/memory/manager.py b/astrbot/core/mind_sim/memory/manager.py new file mode 100644 index 0000000000..7ee920a263 --- /dev/null +++ b/astrbot/core/mind_sim/memory/manager.py @@ -0,0 +1,129 @@ +"""MindSim 记忆管理器 - 统一协调入口 + +每个 chat_id 一个实例(单例),协调对话记忆总结和人物记忆更新。 +""" + +import asyncio + +from astrbot.core import logger +from astrbot.core.db import BaseDatabase +from astrbot.core.mind_sim.AgentMindSubStage import AgentMindSubStage +from astrbot.core.mind_sim.context import MindContext + +from .chat_summarizer import ChatSummarizer +from .person_memory import PersonMemoryManager + + +class MemoryManager: + """统一记忆管理入口""" + + _instances: dict[str, "MemoryManager"] = {} + + def __init__(self, chat_id: str, mind_ctx: MindContext, db: BaseDatabase): + self.chat_id = chat_id + self.mind_ctx = mind_ctx + self.db = db + self._agent_mind = self._create_agent_mind() + self.chat_summarizer = ChatSummarizer(chat_id, self._agent_mind, db) + self.person_memory = PersonMemoryManager(self._agent_mind, db) + self._periodic_task: asyncio.Task | None = None + self._running = False + + def _create_agent_mind(self) -> AgentMindSubStage: + """Create AgentMindSubStage instance using MindContext""" + persona_config = self.mind_ctx.personality_config.get("robot_config", {}) + return AgentMindSubStage.create_for_brain( + event=self.mind_ctx.event, + plugin_context=self.mind_ctx.plugin_context, + persona_config=persona_config, + ) + + async def start(self): + """启动周期性检查任务""" + if self._running: + return + self._running = True + self._periodic_task = asyncio.create_task(self._periodic_loop()) + logger.info(f"[记忆管理-{self.chat_id[:8]}] 已启动周期性检查") + + async def stop(self): + """停止""" + self._running = False + if self._periodic_task: + self._periodic_task.cancel() + try: + await self._periodic_task + except asyncio.CancelledError: + pass + self._periodic_task = None + logger.info(f"[记忆管理-{self.chat_id[:8]}] 已停止") + + async def on_message( + self, user_id: str, nickname: str, content: str, role: str = "user" + ): + """收到消息时调用(用户消息和AI回复都要推入)""" + self.chat_summarizer.add_message(user_id, nickname, content, role) + + async def on_conversation_end( + self, user_id: str, nickname: str, conversation_text: str + ): + """对话结束时调用 + + 1. 立即执行一次话题检查 + 2. 更新人物记忆 + """ + try: + # 立即执行话题检查(不等周期) + await self.chat_summarizer.process() + except Exception as e: + logger.error(f"[记忆管理] 话题检查失败: {e}") + + try: + # 更新人物记忆 + await self.person_memory.update_person_memory( + self.chat_id, user_id, nickname, conversation_text + ) + except Exception as e: + logger.error(f"[记忆管理] 人物记忆更新失败: {e}") + + async def _periodic_loop(self): + """周期性检查循环(60秒间隔)""" + try: + while self._running: + try: + await self.chat_summarizer.process() + except Exception as e: + logger.error(f"[记忆管理] 周期检查出错: {e}") + await asyncio.sleep(self.chat_summarizer.check_interval) + except asyncio.CancelledError: + pass + + def get_snapshot(self) -> dict: + """获取状态快照(用于持久化)""" + return self.chat_summarizer.get_snapshot() + + def restore_from_snapshot(self, data: dict): + """从快照恢复状态""" + self.chat_summarizer.restore_from_snapshot(data) + + @classmethod + def get_or_create( + cls, chat_id: str, mind_ctx: MindContext, db: BaseDatabase + ) -> "MemoryManager": + """获取或创建实例(单例 per chat_id)""" + if chat_id not in cls._instances: + cls._instances[chat_id] = MemoryManager(chat_id, mind_ctx, db) + logger.info(f"[记忆管理] 创建新实例: {chat_id[:8]}") + return cls._instances[chat_id] + + @classmethod + def remove_instance(cls, chat_id: str): + """移除实例""" + inst = cls._instances.pop(chat_id, None) + if inst: + inst._running = False + + @classmethod + def get_all_instances(cls) -> dict[str, "MemoryManager"]: + """获取所有实例""" + return cls._instances diff --git a/astrbot/core/mind_sim/memory/models.py b/astrbot/core/mind_sim/memory/models.py new file mode 100644 index 0000000000..ed635adc19 --- /dev/null +++ b/astrbot/core/mind_sim/memory/models.py @@ -0,0 +1,63 @@ +"""MindSim 记忆系统数据库模型""" + +from sqlmodel import Field, SQLModel, Text + +from astrbot.core.db.po import TimestampMixin + + +class MindSimChatMemory(TimestampMixin, SQLModel, table=True): + """对话记忆表 - 存储话题总结""" + + __tablename__: str = "mindsim_chat_memories" + + id: int | None = Field( + default=None, + primary_key=True, + sa_column_kwargs={"autoincrement": True}, + ) + chat_id: str = Field(nullable=False, index=True) + """对话标识(unified_msg_origin)""" + start_time: float = Field(nullable=False) + """话题起始时间戳""" + end_time: float = Field(nullable=False) + """话题结束时间戳""" + original_text: str = Field(default="", sa_type=Text) + """原始聊天记录文本""" + participants: str = Field(default="[]") + """参与者昵称列表(JSON)""" + theme: str = Field(default="") + """主题/话题标题""" + keywords: str = Field(default="[]") + """关键词(JSON list)""" + summary: str = Field(default="", sa_type=Text) + """概括(50-200字)""" + key_point: str | None = Field(default=None, sa_type=Text) + """关键信息点(JSON list)""" + count: int = Field(default=0) + """被检索次数""" + + +class MindSimPersonMemory(TimestampMixin, SQLModel, table=True): + """人物记忆表 - 存储对人物的印象""" + + __tablename__: str = "mindsim_person_memories" + + id: int | None = Field( + default=None, + primary_key=True, + sa_column_kwargs={"autoincrement": True}, + ) + chat_id: str = Field(nullable=False, index=True) + """来源对话(unified_msg_origin)""" + user_id: str = Field(nullable=False, index=True) + """用户ID""" + nickname: str = Field(default="") + """昵称""" + impression: str = Field(default="", sa_type=Text) + """印象描述""" + traits: str | None = Field(default=None) + """性格特点(JSON list)""" + relationship: str | None = Field(default=None) + """关系描述""" + memorable_events: str | None = Field(default=None, sa_type=Text) + """值得记忆的事件(JSON list)""" diff --git a/astrbot/core/mind_sim/memory/person_memory.py b/astrbot/core/mind_sim/memory/person_memory.py new file mode 100644 index 0000000000..d79fded93e --- /dev/null +++ b/astrbot/core/mind_sim/memory/person_memory.py @@ -0,0 +1,166 @@ +"""MindSim 人物记忆管理 + +从对话中提取人物印象并持久化更新。 +""" + +import json + +from astrbot.core import logger +from astrbot.core.db import BaseDatabase +from astrbot.core.mind_sim.AgentMindSubStage import AgentMindSubStage + +from .models import MindSimPersonMemory +from .prompts import PERSON_IMPRESSION_PROMPT +from .utils import extract_json_from_response + + +class PersonMemoryManager: + """人物记忆管理器""" + + def __init__(self, agent_mind: AgentMindSubStage, db: BaseDatabase): + self.agent_mind = agent_mind + self.db = db + + async def update_person_memory( + self, + chat_id: str, + user_id: str, + nickname: str, + conversation_text: str, + ): + """对话结束后,提取人物印象并更新 + + Args: + chat_id: 对话标识 + user_id: 用户ID + nickname: 用户昵称 + conversation_text: 本次对话文本 + """ + if not conversation_text or not conversation_text.strip(): + return + + log_prefix = f"[人物记忆-{nickname}]" + + try: + # 1. 查询已有记忆 + existing = await self._get_existing_memory(chat_id, user_id) + existing_impression = "(暂无已有印象)" + if existing: + existing_impression = ( + f"印象:{existing.impression}\n" + f"性格特点:{existing.traits or '未知'}\n" + f"关系:{existing.relationship or '未知'}\n" + f"记忆事件:{existing.memorable_events or '无'}" + ) + + # 2. LLM 分析本次对话 + prompt = PERSON_IMPRESSION_PROMPT.format( + nickname=nickname, + user_id=user_id, + existing_impression=existing_impression, + conversation_text=conversation_text[-3000:], # 限制长度 + ) + + response = await self.agent_mind.call_simple(prompt, role="fast") + result = extract_json_from_response(response) + + if not result or not isinstance(result, dict): + logger.warning(f"{log_prefix} LLM 返回无效 JSON,跳过更新") + return + + impression = result.get("impression", "") + traits = result.get("traits", []) + relationship = result.get("relationship", "") + memorable_events = result.get("memorable_events", []) + + if not impression: + logger.warning(f"{log_prefix} 未提取到有效印象,跳过") + return + + # 3. 保存/更新到数据库 + await self._save_person_memory( + chat_id=chat_id, + user_id=user_id, + nickname=nickname, + impression=impression, + traits=json.dumps(traits, ensure_ascii=False) if traits else None, + relationship=relationship or None, + memorable_events=( + json.dumps(memorable_events, ensure_ascii=False) + if memorable_events + else None + ), + existing=existing, + ) + + logger.info( + f"{log_prefix} 人物记忆已更新 | " + f"特点: {len(traits)} 个 | 事件: {len(memorable_events)} 个" + ) + + except Exception as e: + logger.error(f"{log_prefix} 更新人物记忆失败: {e}") + + async def _get_existing_memory( + self, chat_id: str, user_id: str + ) -> MindSimPersonMemory | None: + """查询已有的人物记忆""" + try: + from sqlmodel import select + + async with self.db.get_db() as session: + stmt = select(MindSimPersonMemory).where( + MindSimPersonMemory.chat_id == chat_id, + MindSimPersonMemory.user_id == user_id, + ) + result = await session.execute(stmt) + return result.scalar_one_or_none() + except Exception as e: + logger.error(f"[人物记忆] 查询失败: {e}") + return None + + async def _save_person_memory( + self, + chat_id: str, + user_id: str, + nickname: str, + impression: str, + traits: str | None, + relationship: str | None, + memorable_events: str | None, + existing: MindSimPersonMemory | None, + ): + """保存人物记忆到数据库""" + try: + from sqlmodel import select + + async with self.db.get_db() as session: + async with session.begin(): + if existing: + # 更新已有记录 + stmt = select(MindSimPersonMemory).where( + MindSimPersonMemory.id == existing.id + ) + result = await session.execute(stmt) + record = result.scalar_one_or_none() + if record: + record.nickname = nickname + record.impression = impression + record.traits = traits + record.relationship = relationship + record.memorable_events = memorable_events + else: + # 创建新记录 + record = MindSimPersonMemory( + chat_id=chat_id, + user_id=user_id, + nickname=nickname, + impression=impression, + traits=traits, + relationship=relationship, + memorable_events=memorable_events, + ) + session.add(record) + except Exception as e: + logger.error(f"[人物记忆] 保存失败: {e}") + raise diff --git a/astrbot/core/mind_sim/memory/prompts.py b/astrbot/core/mind_sim/memory/prompts.py new file mode 100644 index 0000000000..d23598b16e --- /dev/null +++ b/astrbot/core/mind_sim/memory/prompts.py @@ -0,0 +1,87 @@ +"""MindSim 记忆系统 LLM 提示词 + +与 MaiBot memory_system 提示词保持一致。 +""" + +# 话题识别提示词(与 MaiBot hippo_topic_analysis_prompt 一致) +TOPIC_ANALYSIS_PROMPT = """【历史话题标题列表】(仅标题,不含具体内容): +{history_topics_block} +【历史话题标题列表结束】 + +【本次聊天记录】(每条消息前有编号,用于后续引用): +{messages_block} +【本次聊天记录结束】 + +请完成以下任务: +**识别话题** +1. 识别【本次聊天记录】中正在进行的一个或多个话题; +2. 【本次聊天记录】的中的消息可能与历史话题有关,也可能毫无关联。 +2. 判断【历史话题标题列表】中的话题是否在【本次聊天记录】中出现,如果出现,则直接使用该历史话题标题字符串; + +**选取消息** +1. 对于每个话题(新话题或历史话题),从上述带编号的消息中选出与该话题强相关的消息编号列表; +2. 每个话题用一句话清晰地描述正在发生的事件,必须包含时间(大致即可)、人物、主要事件和主题,保证精准且有区分度; + +请先输出一段简短思考,说明有什么话题,哪些是不包含在历史话题中的,哪些是包含在历史话题中的,并说明为什么; +然后严格以 JSON 格式输出【本次聊天记录】中涉及的话题,格式如下: +[ + {{ + "topic": "话题", + "message_indices": [1, 2, 5] + }}, + ... +] +""" + +# 话题总结提示词(与 MaiBot hippo_topic_summary_prompt 一致) +TOPIC_SUMMARY_PROMPT = """ +请基于以下话题,对聊天记录片段进行概括,提取以下信息: + +**话题**:{topic} + +**要求**: +1. 关键词:提取与话题相关的关键词,用列表形式返回(3-10个关键词) +2. 概括:对这段话的平文本概括(50-200字),要求: + - 仔细地转述发生的事件和聊天内容; + - 可以适当摘取聊天记录中的原文; + - 重点突出事件的发展过程和结果; + - 围绕话题这个中心进行概括。 +3. 关键信息:提取话题中的关键信息点,用列表形式返回(3-8个关键信息点),每个关键信息点应该简洁明了。 + +请以JSON格式返回,格式如下: +{{ + "keywords": ["关键词1", "关键词2", ...], + "summary": "概括内容", + "key_point": ["关键信息1", "关键信息2", ...] +}} + +聊天记录: +{original_text} + +请直接返回JSON,不要包含其他内容。 +""" + +# 人物印象提取提示词 +PERSON_IMPRESSION_PROMPT = """请根据以下对话内容,分析用户"{nickname}"(ID: {user_id})的特征。 + +**已有印象**: +{existing_impression} + +**本次对话内容**: +{conversation_text} + +请综合已有印象和本次对话,更新对该用户的认知,输出JSON格式: +{{ + "impression": "对该用户的整体印象描述(100-300字,包含性格、说话风格、兴趣爱好等)", + "traits": ["性格特点1", "性格特点2", ...], + "relationship": "与我的关系描述(如:朋友、熟人、陌生人等)", + "memorable_events": ["值得记忆的事件1", "值得记忆的事件2", ...] +}} + +要求: +1. 如果已有印象不为空,请在已有基础上更新和补充,而不是完全覆盖; +2. 保留已有印象中仍然准确的部分,修正不再准确的部分; +3. 新增本次对话中发现的新特征和事件; +4. 印象描述要自然流畅,像是对一个人的真实认知; +5. 请直接返回JSON,不要包含其他内容。 +""" diff --git a/astrbot/core/mind_sim/memory/utils.py b/astrbot/core/mind_sim/memory/utils.py new file mode 100644 index 0000000000..618e88cdf6 --- /dev/null +++ b/astrbot/core/mind_sim/memory/utils.py @@ -0,0 +1,59 @@ +"""MindSim 记忆系统工具函数""" + +import json +import re +from typing import Any + + +def extract_json_from_response(response: str) -> Any: + """从 LLM 响应中提取 JSON + + 支持: + - ```json ... ``` 代码块 + - 直接 JSON 数组 [...] + - 直接 JSON 对象 {...} + """ + if not response: + return None + + # 尝试提取 ```json``` 代码块 + json_pattern = r"```json\s*(.*?)\s*```" + matches = re.findall(json_pattern, response, re.DOTALL) + if matches: + json_str = matches[0].strip() + else: + # 尝试查找 JSON 数组 + start_idx = response.find("[") + end_idx = response.rfind("]") + if start_idx != -1 and end_idx != -1 and end_idx > start_idx: + json_str = response[start_idx : end_idx + 1].strip() + else: + # 尝试查找 JSON 对象 + start_idx = response.find("{") + end_idx = response.rfind("}") + if start_idx != -1 and end_idx != -1 and end_idx > start_idx: + json_str = response[start_idx : end_idx + 1].strip() + else: + # 清理 markdown 标记后尝试 + json_str = response.strip() + json_str = re.sub(r"^```json\s*", "", json_str, flags=re.MULTILINE) + json_str = re.sub(r"^```\s*", "", json_str, flags=re.MULTILINE) + json_str = json_str.strip() + + try: + return json.loads(json_str) + except json.JSONDecodeError: + # 尝试修复常见 JSON 错误(尾部逗号等) + json_str = re.sub(r",\s*([}\]])", r"\1", json_str) + try: + return json.loads(json_str) + except json.JSONDecodeError: + return None + + +def format_timestamp(ts: float) -> str: + """将时间戳格式化为可读字符串""" + from datetime import datetime, timezone + + dt = datetime.fromtimestamp(ts, tz=timezone.utc) + return dt.strftime("%Y-%m-%d %H:%M:%S") diff --git a/astrbot/core/mind_sim/messages.py b/astrbot/core/mind_sim/messages.py new file mode 100644 index 0000000000..8523963b78 --- /dev/null +++ b/astrbot/core/mind_sim/messages.py @@ -0,0 +1,205 @@ +"""mind_sim 内部消息类型定义 + +消息流向: +- 外部 → mind_sim: IncomingUserMessage +- mind_sim → Action: ActionStartMsg, ActionSendMsg, ActionStopMsg +- Action → mind_sim: ActionStateUpdate, ActionOutput +""" + +from dataclasses import dataclass, field +from enum import Enum +from typing import Any + + +class MindMessage: + """mind_sim 内部消息基类""" + + pass + + +@dataclass +class ActionState: + """动作状态快照 - 主思考读取这个来了解动作情况""" + + action_name: str + status: str = ( + "idle" # "idle" | "running" | "paused" | "completed" | "error" | "stopped" + ) + progress: str | None = None # 人类可读的进度描述 + data: dict = field(default_factory=dict) # 动作自定义数据 + prompt_contribution: str | None = None # 贡献给主思考的动态提示词 + can_receive: bool = True # 是否能接收主思考的消息 + error: str | None = None # 错误信息 + created_at: float = 0 + updated_at: float = 0 + + +@dataclass +class ActionStartMsg(MindMessage): + """主思考 → 动作:启动""" + + action_name: str + params: dict = field(default_factory=dict) + + +@dataclass +class ActionSendMsg(MindMessage): + """主思考 → 动作:发送消息(影响运行中的动作)""" + + action_name: str + message: str + data: dict = field(default_factory=dict) + + +@dataclass +class ActionStopMsg(MindMessage): + """主思考 → 动作:停止""" + + action_name: str + reason: str = "" + + +@dataclass +class ActionStateUpdate(MindMessage): + """动作 → mind_sim:状态更新""" + + action_name: str + state: ActionState + + +@dataclass +class ActionOutput(MindMessage): + """动作 → mind_sim:产出""" + + action_name: str + type: ( + str # "reply" | "typing" | "internal" | "error" | "request_think" | "completed" + ) + content: str | None = None + metadata: dict = field(default_factory=dict) + prompt: str | None = None # 触发思考的原因(用于 request_think) + + +@dataclass +class IncomingUserMessage(MindMessage): + """外部 → mind_sim:收到用户消息""" + + sender_id: str + sender_name: str + content: str + is_private: bool + timestamp: float + message_obj: Any = None # 原始消息对象 + + +@dataclass +class Decision: + """主思考的决策""" + + action: str # "START" | "SEND" | "STOP" | "REPLY" | "THINK" | "WAIT" + target: str | None # 目标动作名称 + message: str | None # 消息内容 + reasoning: str | None = None # 决策理由 + params: dict = field(default_factory=dict) + + +class MindEventType(Enum): + """mind_sim 对外输出的事件类型""" + + REPLY = "reply" # 回复用户 + TYPING = "typing" # 正在输入 + THINKING = "thinking" # 思考过程 + ACTION_START = "action_start" # 动作开始 + ACTION_OUTPUT = "action_output" # 动作产出(reply/typing/error 等) + ACTION_END = "action_end" # 动作结束 + INTERNAL = "internal" # 内部状态变化 + TRIGGER_THINK = "trigger_think" # 触发主思考(动作完成/等待结束后请求再次思考) + PIPELINE_YIELD = "pipeline_yield" # 请求 pipeline 框架 yield(让 RespondStage 发送 event.result) + END = "end" # 思考结束(事件流结束) + ERROR = "error" # 错误 + + +@dataclass +class MindEvent: + """mind_sim 对外输出的事件""" + + type: MindEventType + data: dict = field(default_factory=dict) + + @classmethod + def reply(cls, text: str, metadata: dict | None = None) -> "MindEvent": + return cls(type=MindEventType.REPLY, data={"text": text, **(metadata or {})}) + + @classmethod + def typing(cls) -> "MindEvent": + return cls(type=MindEventType.TYPING) + + @classmethod + def thinking(cls, content: str) -> "MindEvent": + return cls(type=MindEventType.THINKING, data={"content": content}) + + @classmethod + def action_start(cls, action_name: str, params: dict) -> "MindEvent": + return cls( + type=MindEventType.ACTION_START, + data={"action": action_name, "params": params}, + ) + + @classmethod + def action_end(cls, action_name: str, result: dict | None = None) -> "MindEvent": + return cls( + type=MindEventType.ACTION_END, + data={"action": action_name, "result": result or {}}, + ) + + @classmethod + def action_output( + cls, + action_name: str, + output_type: str, + content: str, + metadata: dict | None = None, + ) -> "MindEvent": + """动作产出事件(reply/typing/error 等)""" + return cls( + type=MindEventType.ACTION_OUTPUT, + data={ + "action": action_name, + "output_type": output_type, + "content": content, + **(metadata or {}), + }, + ) + + @classmethod + def trigger_think(cls, reason: str = "") -> "MindEvent": + """触发主思考事件(动作完成后请求再次思考)""" + return cls(type=MindEventType.TRIGGER_THINK, data={"reason": reason}) + + @classmethod + def end(cls, reason: str = "") -> "MindEvent": + """思考结束事件""" + return cls(type=MindEventType.END, data={"reason": reason}) + + @classmethod + def pipeline_yield(cls, done_event: Any = None) -> "MindEvent": + """请求 pipeline yield 事件 + + AgentMindSubStage.call() 设置好 event.result 后发出此事件, + InternalMindSubStage 收到后 yield 给 pipeline 框架, + RespondStage 处理完后 yield 返回,通知 done_event。 + + Args: + done_event: asyncio.Event,pipeline yield 完成后 set() + """ + return cls( + type=MindEventType.PIPELINE_YIELD, + data={"done_event": done_event}, + ) + + @classmethod + def error(cls, message: str, metadata: dict | None = None) -> "MindEvent": + """错误事件""" + return cls( + type=MindEventType.ERROR, data={"message": message, **(metadata or {})} + ) diff --git a/astrbot/core/mind_sim/private/__init__.py b/astrbot/core/mind_sim/private/__init__.py new file mode 100644 index 0000000000..bfdcb52766 --- /dev/null +++ b/astrbot/core/mind_sim/private/__init__.py @@ -0,0 +1,26 @@ +"""MindSim 私聊模块 + +包含私聊场景下的主思考模块和相关工具。 +""" + +from .brain import PrivateBrain +from .prompts import ( + ACTION_OPTIONS_TEMPLATE, + DECISION_FORMAT_PROMPT, + MAIN_THINKING_SYSTEM_PROMPT, + build_action_options_prompt, + build_action_states_prompt, + build_history_prompt, + build_main_thinking_prompt, +) + +__all__ = [ + "PrivateBrain", + "DECISION_FORMAT_PROMPT", + "ACTION_OPTIONS_TEMPLATE", + "MAIN_THINKING_SYSTEM_PROMPT", + "build_action_options_prompt", + "build_action_states_prompt", + "build_history_prompt", + "build_main_thinking_prompt", +] diff --git a/astrbot/core/mind_sim/private/actions/EndConversation.py b/astrbot/core/mind_sim/private/actions/EndConversation.py new file mode 100644 index 0000000000..5a94238b4f --- /dev/null +++ b/astrbot/core/mind_sim/private/actions/EndConversation.py @@ -0,0 +1,76 @@ +from collections.abc import AsyncGenerator + +from astrbot.core.mind_sim import Action, ActionOutput + + +class EndConversationAction(Action): + """结束对话动作 - 停止所有动作并退出 + + 适用于: + - 结束当前对话场景 + - 停止所有正在执行的动作 + - 发送 END 事件退出事件流 + + **注意:此动作会停止整个思考流程** + """ + + name = "end_conversation" + description = """结束对话动作 - 退出当前对话场景 + +**重要:此动作会停止所有动作并退出思考流程** +适用于: +- 结束当前对话 +- 清理所有正在进行的动作 +- 完全退出当前思考流程 +如果你想结束对话,请输入为什么想结束对话 +参数: {"reason": "向用户说的结束原因",reply:"根据你的性格特征结束对话回复给用户的内容"(可选)} +""" + fixed_prompt = "正在结束对话" + priority = -200 # 最低优先级 + + usage_guide = """ + - 适用于需要完全结束对话的场景 + - 会停止所有正在运行的动作 + - 退出后不会再触发任何思考 + """ + + def get_completion_output(self) -> ActionOutput | None: + """重写完成行为:发送 END 类型""" + # END 类型是特殊的事件,会直接关闭事件流 + return ActionOutput( + action_name=self.instance_id or self.name, + type="end", + content="对话已结束", + ) + + async def run(self, params: dict) -> AsyncGenerator[ActionOutput, None]: + reason = params.get("reason", "用户主动结束") + + self.update_state( + progress="结束对话中", + prompt_contribution=f"正在结束对话: {reason}", + ) + + # 先停止所有其他正在运行的动作 + # 注意:这里需要通过 executor 来停止,但 Action 本身无法直接访问 executor + # 所以通过发送消息的方式来处理 + relpy = params.get("reply", None) + + if relpy: + # yield ActionOutput( #后续编辑使用,应该传入事件使用 + # action_name=self.instance_id or self.name, + # type="reply", + # content=f"{relpy}", + # metadata={"no_think": True}, # 标记不触发重新思考 + # ) + yield ActionOutput( + action_name=self.instance_id or self.name, + type="noop", + content="对话已结束", + ) + else: + yield ActionOutput( + action_name=self.instance_id or self.name, + type="noop", + content="对话已结束", + ) diff --git a/astrbot/core/mind_sim/private/actions/NoOp.py b/astrbot/core/mind_sim/private/actions/NoOp.py new file mode 100644 index 0000000000..7960714d3a --- /dev/null +++ b/astrbot/core/mind_sim/private/actions/NoOp.py @@ -0,0 +1,60 @@ +from collections.abc import AsyncGenerator + +from astrbot.core.mind_sim import Action, ActionOutput + + +class NoOpAction(Action): + """空动作 - 什么都不做 + + 适用于: + - 跳过当前思考轮次,不产生任何输出 + - 保持静默状态一段时间 + + **完成后不会触发重新思考** + """ + + name = "noop" + description = """空动作 - 什么都不做 + +**重要:完成后不会触发重新思考** + +适用于: +- 保持静默状态 +- 跳过本次思考轮次 +- 临时沉默 + +参数: {} +""" + fixed_prompt = "无操作" + priority = -100 # 最低优先级 + + usage_guide = """ + - 适用于需要暂时停止但不离场的情况 + - 适用于占据思考轮次但不产生回复 + - 不会触发重新思考,保持当前状态 + """ + + def get_completion_output(self) -> ActionOutput | None: + """重写完成行为:不触发重新思考""" + return ActionOutput( + action_name=self.instance_id or self.name, + type="completed_no_think", + content="", + ) + + async def on_complete(self, params: dict) -> None: + """完成后添加临时提示词""" + self.add_temp_prompt("刚刚选择了静默,没有要回复的内容", rounds=3) + + async def run(self, params: dict) -> AsyncGenerator[ActionOutput, None]: + self.update_state( + progress="无操作", + prompt_contribution="当前选择静默", + ) + + # 什么也不做,直接完成 + yield ActionOutput( + action_name=self.instance_id or self.name, + type="noop", + content="", + ) diff --git a/astrbot/core/mind_sim/private/actions/Reply/Reply.py b/astrbot/core/mind_sim/private/actions/Reply/Reply.py new file mode 100644 index 0000000000..0a580044b5 --- /dev/null +++ b/astrbot/core/mind_sim/private/actions/Reply/Reply.py @@ -0,0 +1,268 @@ +"""回复动作 - 调用 LLM 生成并发送消息 + +支持根据主思考传入的参数生成不同风格的回复。 + +职责: +1. 调用 LLM 生成回复(通过 AgentMindSubStage.call,完整流程) +2. 发送消息到用户(AgentMindSubStage 自动处理) +3. 保存 AI 回复到对话历史 +""" + +import json +from collections.abc import AsyncGenerator +from typing import TYPE_CHECKING + +from astrbot.core import logger +from astrbot.core.mind_sim import Action, ActionOutput +from astrbot.core.mind_sim.private.actions.Reply.reply_prompts import ( + build_reply_prompt, +) +from astrbot.core.mind_sim.private.prompts import ( + build_action_states_prompt, + build_history_prompt, + build_temp_prompts_section, +) + +if TYPE_CHECKING: + pass + + +class ReplyAction(Action): + """回复动作 - 调用 LLM 生成并发送消息 + + 支持根据主思考传入的参数生成不同风格的回复。 + """ + + name = "reply" + description = """回复动作 - 调用 LLM 生成回复并发送 不要回复太频繁,像真人一样,能拒绝回复 +**回复完成后会自动触发下一轮思考** +**自动调用 LLM 生成回复内容**: +- reply_type: 正常回复(normal)/追加回复(append) +- reply_guidance: 给指导方向,什么话题,传入给回复器的知识,内容等等,这是给另一个大模型进行专门回复的参考与指导 +- target: 追加回复时,要补充的原发言内容(仅 append 类型需要) +- reason: 追加回复时,补充的原因(仅 append 类型需要) +参数: {"reply_type": "normal", "reply_guidance": "就今天天气很好进行回复,今天天气17°"} +追加示例: {"reply_type": "append", "reply_guidance": "", "target": "今天天气不错", "reason": "忘了说温度"} +""" + fixed_prompt = "正在生成回复" + priority = 100 + + usage_guide = """ + - 适用于需要 AI 生成回复的场景 + - normal: 正常回复,根据聊天内容口语化回复 + - append: 追加回复,补充说明自己刚刚的发言,需要传入 target 和 reason + - 主思考传入 reply_guidance 指导回复方向 + """ + + async def on_complete(self, params: dict) -> None: + """完成后添加临时提示词""" + text = self._state.data.get("reply_text", "") + if text: + self.add_temp_prompt( + f"已回复: {text} 提示:距离0秒的这条语句 则这是回复后调用思考,可以选择只等待,或者追加回复,避免频繁回复,不要回复的太频繁", + rounds=5, + ) + + async def run(self, params: dict) -> AsyncGenerator[ActionOutput, None]: + """运行回复动作 + + 流程: + 1. 获取对话历史和上下文 + 2. 调用 LLM 生成回复 + 3. 发送消息到用户 + 4. 保存回复到历史 + """ + reply_type = params.get("reply_type", "normal") + reply_guidance = params.get("reply_guidance", "") + target = params.get("target", "") + reason = params.get("reason", "") + + self.update_state( + progress="准备生成回复", + prompt_contribution=f"正在生成 {reply_type} 风格回复", + data={"reply_type": reply_type}, + ) + + # 获取对话历史 + dialogue_history = await self._get_dialogue_history_formatted() + + # 获取临时提醒 + temp_prompts_str = self._build_temp_prompts_formatted() + + # 获取运行中的动作实例状态 + running_actions_str = self._build_running_states_formatted() + + # 构建提示词 + prompt = build_reply_prompt( + reply_type=reply_type, + reply_guidance=reply_guidance, + ctx=self.ctx, + dialogue_history=dialogue_history, + target=target, + reason=reason, + temp_prompts=temp_prompts_str, + running_actions=running_actions_str, + ) + + ORANGE = "\033[38;5;214m" + RESET = "\033[0m" + logger.debug(f"{ORANGE}[ReplyAction] 回复提示词: {prompt}{RESET}") + + # 调用 LLM 生成回复(与 internal.py 架构完全一致) + # 会自动处理:typing 状态、事件钩子、会话锁、流式/普通响应、保存历史 + self.update_state(progress="调用 LLM 生成回复中") + reply_text = "" + + try: + # send_to_platform=True:通过 PIPELINE_YIELD 桥接 pipeline 框架发送消息 + # 自动处理 typing、hook、session lock + reply_text = await self.llm.call( + prompt=prompt, + role="reply", + send_to_platform=True, + ) + + except Exception as e: + self.update_state( + progress="LLM 调用失败", + prompt_contribution=f"生成回复失败: {e}", + ) + yield ActionOutput( + action_name=self.instance_id or self.name, + type="reply", + content="抱歉,生成回复时出错了", + ) + return + + # 清理回复内容 + reply_text = self._clean_response(reply_text) + + if not reply_text: + self.update_state(progress="回复为空", prompt_contribution="LLM 返回空内容") + # return + else: + self.update_state( + progress="发送回复", + prompt_contribution=f"回复内容: {reply_text[:50]}...", + data={"reply_text": reply_text}, + ) + + # 保存回复到历史(call 已通过 event.send 发送,这里只保存历史) + await self._save_reply_to_history(reply_text) + + # 通知主思考回复已完成,触发重新思考 + yield ActionOutput( + action_name=self.instance_id or self.name, + type="completed", + prompt=f"已回复: {reply_text}", + content="", + ) + + self.update_state( + status="completed", + progress="回复完成", + prompt_contribution=None, + ) + + async def _save_reply_to_history(self, text: str) -> None: + """保存 AI 回复到对话历史 + + 从 conv_manager 读取当前 history,追加 assistant 消息,然后更新。 + """ + if not self.ctx.conv_manager or not self.ctx.conversation_id: + logger.debug( + "[ReplyAction] 无法保存历史:缺少 conv_manager 或 conversation_id" + ) + return + + try: + conv = await self.ctx.conv_manager.get_conversation( + self.ctx.unified_msg_origin, self.ctx.conversation_id + ) + if not conv: + logger.debug("[ReplyAction] 无法保存历史:对话不存在") + return + + history = json.loads(conv.history) if conv.history else [] + history.append({"role": "assistant", "content": text}) + + await self.ctx.conv_manager.update_conversation( + self.ctx.unified_msg_origin, + self.ctx.conversation_id, + history=history, + ) + logger.debug(f"[ReplyAction] 已保存回复到历史,长度: {len(text)}") + except Exception as e: + logger.warning(f"[ReplyAction] 保存历史失败: {e}") + + async def _get_dialogue_history_formatted(self) -> str: + """获取格式化的对话历史""" + if not self.ctx.conv_manager or not self.ctx.conversation_id: + return "暂无对话历史" + + try: + conv = await self.ctx.conv_manager.get_conversation( + self.ctx.unified_msg_origin, self.ctx.conversation_id + ) + if not conv or not conv.history: + return "暂无对话历史" + + history = json.loads(conv.history) + if not history: + return "暂无对话历史" + + chat_config = self.ctx.chat_config or {} + message_length = chat_config.get("message_length", 10) + if not isinstance(message_length, int) or message_length < 1: + message_length = 10 + + return build_history_prompt(history, max_turns=message_length) + except Exception: + return "暂无对话历史" + + def _build_temp_prompts_formatted(self) -> str: + """获取格式化的临时提醒""" + if not self._executor: + return "" + temp_contents = self._executor.tick_temp_prompts(consume_rounds=False) + if not temp_contents: + return "" + return build_temp_prompts_section(temp_contents) + + def _build_running_states_formatted(self) -> str: + """获取格式化的动作实例状态""" + if not self._executor: + return "" + running_states = self._executor.get_running_states() + if not running_states: + return "" + return build_action_states_prompt(running_states) + + @staticmethod + def _clean_response(text: str) -> str: + """清理 LLM 返回的内容""" + if not text: + return "" + + # 移除常见前缀 + prefixes_to_remove = [ + "回复:", + "以下是我的回复:", + "我的回复:", + "答复:", + "回答:", + ] + for prefix in prefixes_to_remove: + if text.startswith(prefix): + text = text[len(prefix) :].strip() + + # 移除常见后缀 + suffixes_to_remove = [ + "以上", + "以上就是", + ] + for suffix in suffixes_to_remove: + if text.endswith(suffix): + text = text[: -len(suffix)].strip() + + return text.strip() diff --git a/astrbot/core/mind_sim/private/actions/Reply/__init__.py b/astrbot/core/mind_sim/private/actions/Reply/__init__.py new file mode 100644 index 0000000000..658807aa5a --- /dev/null +++ b/astrbot/core/mind_sim/private/actions/Reply/__init__.py @@ -0,0 +1 @@ +from .Reply import ReplyAction as ReplyAction diff --git a/astrbot/core/mind_sim/private/actions/Reply/reply_prompts.py b/astrbot/core/mind_sim/private/actions/Reply/reply_prompts.py new file mode 100644 index 0000000000..07c46918e7 --- /dev/null +++ b/astrbot/core/mind_sim/private/actions/Reply/reply_prompts.py @@ -0,0 +1,169 @@ +"""MindSim 回复动作提示词模块 + +根据 reply_type 类型选择不同的提示词模板,支持: +- normal: 正常回复 +- append: 追加回复(补充自己之前的发言) +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from astrbot.core.mind_sim.context import MindContext + + +# ========== 正常回复提示词 ========== + +NORMAL_REPLY_PROMPT = """ +你是{bot_name},正在和人聊天。 + +临时提醒 +{temp_prompts} + +当前运行的动作实例 +{running_actions} +当前状态 当前时间:{current_time} 聊天:{chat_group_name} + +以上为系统状态 +你现在是 {system_prompt} 这个人格,保持你的特质: {personality_traits} + +当前心情 +{mood} + +用这样的表达风格 +{expression_style} + +最近对话 +{dialogue_history} + +你是{bot_name},正在和人聊天。 +你现在应该就以下指导的话题进行回复: +{reply_guidance} +请不要输出违法违规内容,不要输出色情,暴力,政治相关内容,如有敏感内容,请规避。 + +请注意不要输出多余内容(包括不必要的前后缀,冒号,括号,表情包,at或 @等 ),只输出发言内容就好。 + +现在请你读读之前的聊天记录,然后给出日常且口语化的回复 +尽量简短一些。{keywords_reaction_prompt}请注意把握聊天内容,不要回复的太有条理,给出日常的回复,可以有个性 +免得啰嗦或者回复内容太乱。 + +现在,你说: +""" + + +# ========== 追加回复提示词 ========== + +APPEND_REPLY_PROMPT = """你是{bot_name},正在和人聊天。 + +临时提醒 +{temp_prompts} + +当前运行的动作实例 +{running_actions} +当前状态 当前时间:{current_time} 聊天:{chat_group_name} + +以上为系统状态 +你现在是 {system_prompt} 这个人格,保持你的特质: {personality_traits} + +当前心情 +{mood} + +用这样的表达风格 +{expression_style} + +最近对话 +{dialogue_history} + +你是{bot_name},正在和人聊天。 + +你是{bot_name},正在和人聊天。 +你现在想补充说明你刚刚自己的发言内容:{target},原因是{reason} +请你根据聊天内容,组织一条新回复。注意,{target} 是刚刚你自己的发言,你要在这基础上进一步发言,请按照你自己的角度来继续进行回复。注意保持上下文的连贯性。 +{reply_guidance} +请不要输出违法违规内容,不要输出色情,暴力,政治相关内容,如有敏感内容,请规避。 + +请注意不要输出多余内容(包括不必要的前后缀,冒号,括号,表情包,at或 @等 ),只输出发言内容就好。 + +尽量简短一些。{keywords_reaction_prompt}请注意把握聊天内容,不要回复的太有条理,可以有个性。 + +现在,你说: +""" + + +# ========== 提示词组装器 ========== + +REPLY_TYPE_PROMPTS = { + "normal": NORMAL_REPLY_PROMPT, + "append": APPEND_REPLY_PROMPT, +} + + +def build_reply_prompt( + reply_type: str, + reply_guidance: str, + ctx: MindContext, + dialogue_history: str, + *, + target: str = "", + reason: str = "", + temp_prompts: str = "", + running_actions: str = "", +) -> str: + """构建回复提示词 + + Args: + reply_type: 回复类型 (normal/append) + reply_guidance: 主思考给的指导 + ctx: MindContext 上下文 + dialogue_history: 对话历史(已由 prompts.build_history_prompt 格式化) + target: 追加回复时,要补充的原发言内容 + reason: 追加回复时,补充的原因 + temp_prompts: 临时提醒(已由 prompts.build_temp_prompts_section 格式化) + running_actions: 动作实例状态(已由 prompts.build_action_states_prompt 格式化) + + Returns: + 完整提示词 + """ + # 获取提示词模板 + template = REPLY_TYPE_PROMPTS.get(reply_type, NORMAL_REPLY_PROMPT) + + # 获取人格配置 + personality_config = ctx.personality_config or {} + traits = personality_config.get("traits", "善良、智能、有趣") + expression_style = personality_config.get("expression_style", "自然、友好") + + # 获取系统提示词 + system_prompt = ctx.system_prompt or "你是一个助手" + + # 获取心情(从上下文内存中获取,当前思考周期的心情) + mood = ctx.memory.get("current_mood", "平静") + + # 获取机器人名称 + robot_config = ctx.robot_config or {} + bot_name = robot_config.get("nickname", "助手") + + # 当前时间 + from datetime import datetime + + current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S") + + # 聊天名称(从 unified_msg_origin 提取) + chat_group_name = ctx.unified_msg_origin or "群聊" + + return template.format( + bot_name=bot_name, + current_time=current_time, + chat_group_name=chat_group_name, + system_prompt=system_prompt, + personality_traits=traits, + mood=mood, + expression_style=expression_style, + running_actions=running_actions or "无", + dialogue_history=dialogue_history or "暂无", + reply_guidance=reply_guidance or "根据聊天内容自然回复", + keywords_reaction_prompt="", + temp_prompts=temp_prompts or "无", + target=target, + reason=reason, + ) diff --git a/astrbot/core/mind_sim/private/actions/RunTask.py b/astrbot/core/mind_sim/private/actions/RunTask.py new file mode 100644 index 0000000000..9e9a7a5f27 --- /dev/null +++ b/astrbot/core/mind_sim/private/actions/RunTask.py @@ -0,0 +1,432 @@ +"""执行任务动作 - 使用 Agent 执行复杂任务 + +工作流程: +1. 启动时创建 AgentRunner,执行指定任务 +2. 每执行一轮后,触发主思考,让主思考决定下一步 +3. 支持通过 SEND 追加新消息/指令 +4. 支持通过 STOP 停止任务 +5. 任务完成后自动触发重新思考 + +参数: +- task: 要执行的任务描述(必填) +- max_steps: 最大执行轮数,默认 10 +""" + +from collections.abc import AsyncGenerator +from typing import Any + +from astrbot.core import logger +from astrbot.core.astr_agent_run_util import AgentRunner, run_agent +from astrbot.core.astr_main_agent import ( + MainAgentBuildConfig, + build_main_agent, +) +from astrbot.core.mind_sim import Action, ActionOutput, ActionSendMsg + +TOOL_ASSISTANT_PROMPT = """你是一个工具助手。你的任务是根据用户的指令,使用各种工具来完成任务。 + +## 重要规则 +1. 仔细理解用户给你的任务要求 +2. 合理选择和使用可用的工具 +3. 每使用完一个工具后,根据返回结果决定下一步 +4. 如果任务完成或无法继续,及时汇报结果 +5. 如果需要更多输入或信息,明确告诉用户 + +## 与主控制者的交互方式 +每执行完一步后,主控制者会决定你的下一步。你可能会收到以下指令: + +**追加指令(通过 SEND 发送):** +主控制者会通过 SEND 给你发送新的指令或信息,例如: +- "继续执行下一个步骤" +- "停止当前操作,改为执行其他任务" +- "给你看看目前的进度" +- "补充更多信息:xxx" + +收到追加指令后,你应该: +1. 理解新指令的含义 +2. 根据新指令继续执行或调整任务 +3. 如果指令让你继续,就继续使用工具完成任务 +4. 如果指令让你停止或改变方向,按新指令执行 + +**停止指令(通过 STOP 发送):** +如果主控制者发送 STOP,意味着任务被终止,你应该: +1. 立即停止当前操作 +2. 总结已完成的工作 +3. 告知用户任务已被终止 + +## 输出格式 +- 使用工具时,说明你要做什么 +- 每步执行完后,等待主控制者的下一步指令 +- 任务完成后或被终止时,总结你做了什么 +- 遇到问题时,说明遇到了什么困难 + +现在开始执行任务:""" + + +class RunTaskAction(Action): + """执行任务动作 - 使用 Agent 执行复杂任务 + + 适用于: + - 需要执行复杂的多步骤任务 + - 需要使用各种工具来完成任务 + - 任务需要多轮交互才能完成 + + **每执行完一轮会自动触发主思考,让主思考决定是否继续** + """ + + name = "run_task" + description = """ +执行任务动作 - 使用 Agent 执行动作 +是解决不了的问题都可以调用这个动作试试看 +**重要:每执行完一轮会自动触发主思考** +适用于: +- 执行复杂的多步骤任务 +- 需要使用工具查询信息 +- 任务需要多轮交互才能完成 +- 操作电脑,执行程序,查看电脑上的东西调用控制台等, +- 一切需要操作的事情 +- 建议多调用这个工具 +工作流程: +1. 你指定任务目标和参数,启动动作 +2. 动作执行过程中,每完成一步会触发重新思考 +3. 你可以通过 SEND 追加新的指令或信息 +4. 你可以通过 STOP 停止任务 +参数: +{"task": "任务描述"(不能为空,传递给Agent的指令,要详细), "max_steps": 10} +示例 +{"task": "帮我看看我电脑桌面有什么东西", "max_steps": 10} +{"task": "帮我打开飞书", "max_steps": 10} +{"task": "写某某代码保存到桌面新建文件夹", "max_steps": 20} + +""" + fixed_prompt = "执行任务中" + priority = 10 # 高优先级,任务通常比较重要 + + usage_guide = """ + - 当需要执行复杂任务时使用 + - 当需要使用工具查询信息时使用 + - 任务会自动执行,每轮结束后会询问你 + - 你可以随时通过 SEND 追加指令或 STOP 停止 + """ + + # 存储 AgentRunner 实例 + _agent_runner: AgentRunner | None = None + _current_step: int = 0 + _max_steps: int = 10 + _task_description: str = "" + _task_completed: bool = False + _reply_to_platform: bool = False # 是否直接回复到平台,默认关闭 + + # 存储每步的回复内容,供主思考使用 + _step_responses: list[dict] = [] + _pending_think_reason: str | None = None # 待触发的思考原因 + _final_result_responses: str = "" + + async def run(self, params: dict) -> AsyncGenerator[ActionOutput, None]: + """执行任务""" + self._task_description = params.get("task", "") + self._max_steps = params.get("max_steps", 10) + self._reply_to_platform = params.get( + "reply_to_platform", False + ) # 从参数获取开关 + self._step_responses = [] # 重置 + + if not self._task_description: + yield ActionOutput( + action_name=self.instance_id or self.name, + type="error", + content="任务描述不能为空", + ) + return + + self.update_state( + progress=f"执行任务中: {self._task_description[:30]}...", + prompt_contribution=f"正在执行任务: {self._task_description}", + data={ + "task": self._task_description, + "max_steps": self._max_steps, + }, + ) + + logger.info(f"[RunTask] 开始执行任务: {self._task_description}") + + try: + # 构建 AgentRunner(使用和 internal.py 完全相同的配置) + self._agent_runner = await self._build_agent_runner() + + if not self._agent_runner: + yield ActionOutput( + action_name=self.instance_id or self.name, + type="error", + content="无法创建 Agent,请检查配置", + ) + return + + # 执行任务循环(async generator 需要直接迭代) + async for output in self._run_task_loop(): + yield output + + except Exception as e: + logger.error(f"[RunTask] 执行出错: {e}") + yield ActionOutput( + action_name=self.instance_id or self.name, + type="error", + content=f"执行出错: {str(e)}", + ) + + async def _build_agent_runner(self) -> AgentRunner | None: + """构建 AgentRunner(使用和 internal.py 完全相同的配置)""" + # 从上下文中获取必要的信息 + ctx = self.ctx + if not ctx: + logger.error("[RunTask] 上下文为空") + return None + + # 获取 event 和 plugin_context + event = ctx.event + plugin_context = ctx.plugin_context + + if not event or not plugin_context: + logger.error("[RunTask] event 或 plugin_context 为空") + return None + + # 获取配置 + conf = plugin_context.get_config() + settings = conf.get("provider_settings", {}) + + # 构建主代理配置 + main_agent_cfg = MainAgentBuildConfig( + tool_call_timeout=settings.get("tool_call_timeout", 60), + streaming_response=False, # 禁用流式响应 + tool_schema_mode=settings.get("tool_schema_mode", "full"), + sanitize_context_by_modalities=settings.get( + "sanitize_context_by_modalities", False + ), + kb_agentic_mode=conf.get("kb_agentic_mode", False), + file_extract_enabled=settings.get("file_extract", {}).get("enable", False), + context_limit_reached_strategy=settings.get( + "context_limit_reached_strategy", "truncate_by_turns" + ), + llm_compress_instruction=settings.get("llm_compress_instruction", ""), + llm_compress_keep_recent=settings.get("llm_compress_keep_recent", 4), + max_context_length=settings.get("max_context_length", 128000), + dequeue_context_length=settings.get("dequeue_context_length", 20), + llm_safety_mode=settings.get("llm_safety_mode", True), + safety_mode_strategy=settings.get("safety_mode_strategy", "system_prompt"), + computer_use_runtime=settings.get("computer_use_runtime"), + sandbox_cfg=settings.get("sandbox", {}), + add_cron_tools=settings.get("proactive_capability", {}).get( + "add_cron_tools", True + ), + provider_settings=settings, + subagent_orchestrator=conf.get("subagent_orchestrator", {}), + timezone=conf.get("timezone"), + max_quoted_fallback_images=settings.get("max_quoted_fallback_images", 20), + ) + + # 构建 AgentRunner + # 不传 req,让 build_main_agent 自己构建,之后覆盖 system_prompt + build_result = await build_main_agent( + event=event, + plugin_context=plugin_context, + config=main_agent_cfg, + apply_reset=False, + ) + + if build_result: + # 强制覆盖 system_prompt,只使用工具助手提示词,不使用人格配置 + # build_result.provider_request.system_prompt = TOOL_ASSISTANT_PROMPT + build_result.provider_request.prompt += TOOL_ASSISTANT_PROMPT + + # 如果 apply_reset=False,需要手动调用 reset + if build_result.reset_coro: + await build_result.reset_coro + + # 覆盖 agent_runner 内部的 req + # build_result.agent_runner.req.system_prompt = TOOL_ASSISTANT_PROMPT + build_result.agent_runner.req.prompt += TOOL_ASSISTANT_PROMPT + + return build_result.agent_runner + + return None + + def _on_agent_step(self, step_idx: int, resp_type: str, resp_data: Any) -> None: + """run_agent 的回调,处理每步的消息""" + if resp_type == "tool_call": + # 工具调用 + msg_chain = resp_data.get("chain") + tool_name = "unknown" + if msg_chain: + for comp in msg_chain.chain: + if hasattr(comp, "data") and isinstance(comp.data, dict): + tool_name = comp.data.get("name", "unknown") + break + self._append_prompt_contribution(f"[使用工具: {tool_name}]") + # self._pending_think_reason = f"Agent使用了工具 {tool_name}" + + elif resp_type == "tool_call_result": + # 工具结果 + msg_chain = resp_data.get("chain") + result = msg_chain.get_plain_text() if msg_chain else "" + self._append_prompt_contribution(f"[工具返回结果]{result}") + self._pending_think_reason = f"工具返回了结果{result}" + + elif resp_type == "llm_result": + # LLM 回复 + msg_chain = resp_data.get("chain") + content = msg_chain.get_plain_text() if msg_chain else "" + if content: + self._append_prompt_contribution(f"[Agent回复: {content}]") + # 存储到列表 + self._step_responses.append( + { + "step": step_idx, + "type": "reply", + "content": content, + } + ) + self._pending_think_reason = f"Agent回复了: {content}...现在需要确认要不要根据Agent的回复向对方报告进度" + + elif resp_type == "done": + # 任务完成 + self._task_completed = True + final_resp = self._agent_runner.get_final_llm_resp() + if final_resp and final_resp.completion_text: + self._final_result_responses = final_resp.completion_text + self._append_prompt_contribution( + f"[任务完成,最终回复: {final_resp.completion_text}...请根据结果回复给对方]" + ) + else: + self._append_prompt_contribution("[任务已完成]") + self._pending_think_reason = "任务已完成" + + async def _run_task_loop(self) -> AsyncGenerator[ActionOutput, None]: + """执行任务循环 + + 直接调用 run_agent 一次,内部会循环执行 max_step 步: + - 通过回调 _on_agent_step 收集每步的消息 + - 消息追加到 prompt_contribution + - 完成后触发主思考 + """ + if not self._agent_runner: + return + + self._current_step = 0 + + logger.info(f"[RunTask] 开始执行任务,最大 {self._max_steps} 步") + + self.update_state( + progress=f"执行任务中: {self._task_description[:30]}...", + data={ + "task": self._task_description, + "max_steps": self._max_steps, + }, + ) + + try: + # 直接调用 run_agent 执行所有步 + async for _ in run_agent( + self._agent_runner, + max_step=self._max_steps, + show_tool_use=False, + show_tool_call_result=False, + stream_to_general=True, # 忽略流式内容 + step_callback=self._on_agent_step, + ): + # 检查是否有待触发的思考 + if self._pending_think_reason: + reason = self._pending_think_reason + self._pending_think_reason = None # 清空 + yield ActionOutput( + action_name=self.instance_id or self.name, + type="request_think", + content=reason, + prompt=reason, # 传给 Brain 的原因 + ) + + # 检查是否完成 + if self._agent_runner.done(): + logger.info("[RunTask] 任务完成") + else: + logger.info("[RunTask] 任务未完成") + + except Exception as e: + logger.error(f"[RunTask] 执行出错: {e}") + self._append_prompt_contribution(f"[执行出错: {str(e)}]") + yield ActionOutput( + action_name=self.instance_id or self.name, + type="error", + content=f"执行出错: {str(e)}", + ) + return + + # 任务完成,触发主思考 + yield ActionOutput( + action_name=self.instance_id or self.name, + type="completed", + content=f"任务执行完成: {self._task_description}", + metadata={ + "max_steps": self._max_steps, + "completed": self._task_completed, + }, + ) + + def _append_prompt_contribution(self, suffix: str) -> None: + """追加 prompt_contribution(而不是覆盖)""" + current = self._state.prompt_contribution or "" + if current: + self.update_state(prompt_contribution=f"{current} {suffix}") + else: + self.update_state( + prompt_contribution=f"可以在合适的时候向聊天对象汇报进度:执行任务中: {self._task_description} {suffix}" + ) + + async def on_complete(self, params: dict) -> None: + """完成后添加临时提示词(仅正常完成时调用)""" + # 构建任务执行摘要 + summary_parts = [f"任务: {self._task_description}"] + + # 2. 每次 Agent 回复 + if self._step_responses: + summary_parts.append( + f"\nAgent 执行过程(共 {len(self._step_responses)} 轮):" + ) + for i, response in enumerate(self._step_responses, 1): + # 截取前200字符避免过长 + content = ( + str(response)[:200] + "..." + if len(str(response)) > 200 + else str(response) + ) + summary_parts.append(f" 第{i}轮: {content}") + + # 3. 最终结果 + final_result = self._final_result_responses + + if final_result: + result_preview = final_result + summary_parts.append(f"\n最终结果: {result_preview}") + + # 4. 完成状态 + if self._task_completed: + summary_parts.append("\n状态: 任务已完成") + else: + summary_parts.append( + f"\n状态: 已执行 {len(self._step_responses)} 轮,未完全完成" + ) + + # 将摘要添加为临时提示词(保留5轮思考) + summary = "\n".join(summary_parts) + self.add_temp_prompt(f"run_task 执行结果:\n{summary}", rounds=5) + + def on_message(self, msg: ActionSendMsg) -> None: + """处理接收到的消息""" + logger.info(f"[RunTask] 收到消息: {msg.message[:50]}...") + + async def on_stop(self) -> None: + """停止时清理资源""" + if self._agent_runner: + try: + self._agent_runner.request_stop() + except Exception as e: + logger.error(f"[RunTask] 停止时出错: {e}") diff --git a/astrbot/core/mind_sim/private/actions/Wait.py b/astrbot/core/mind_sim/private/actions/Wait.py new file mode 100644 index 0000000000..cb7e720e51 --- /dev/null +++ b/astrbot/core/mind_sim/private/actions/Wait.py @@ -0,0 +1,123 @@ +import asyncio +from collections.abc import AsyncGenerator + +from astrbot.core.mind_sim import Action, ActionOutput, ActionStopMsg + + +class WaitAction(Action): + """等待动作 - 暂停思考,等待指定时间 + + 等待结束后会自动触发下一轮思考。 + 可被用户消息打断。 + """ + + name = "wait" + description = """等待动作 - 暂停思考,等待指定时间 + +**重要:等待结束后会自动触发下一轮思考** + +适用于以下情况: +- 你已经表达清楚一轮,想给对方留出空间 +- 你感觉对方的话还没说完,或者自己刚刚发了好几条连续消息 +- 你想要等待一定时间来让对方把话说完,或者等待对方反应 +- 你想保持安静,专注"听"而不是马上回复 + +请你根据上下文来判断要等待多久: +- 如果你们交流间隔时间很短,聊的很频繁,不宜等待太久(10-30秒) +- 如果你们交流间隔时间很长,聊的很少,可以等待较长时间(60-120秒) + +参数: {"duration": 60} +""" + fixed_prompt = "正在等待中" + priority = 0 + + usage_guide = """ + - 当你不知道该做什么时使用 + - 当需要等待用户回复时使用 + - 当需要给对方留出思考空间时使用 + - 等待结束后会自动再次进入思考 + """ + + def __init__(self): + super().__init__() + self._stop_event = None + + async def on_complete(self, params: dict) -> None: + """完成后添加临时提示词(仅正常完成时调用)""" + # 从 state 中获取实际等待时间 + wait_time = self._state.data.get("actual_wait_time", 0) + if wait_time: + self.add_temp_prompt( + f"已等待: {int(wait_time)}秒 ,如果有这句话,则有重复的等待,不用重新调用等待任务", + rounds=5, + min_duration=30.0, + ) + + async def on_stop(self) -> None: + """立即中断等待""" + if self._stop_event: + self._stop_event.set() + + async def run(self, params: dict) -> AsyncGenerator[ActionOutput, None]: + self.update_state( + progress="等待中", + prompt_contribution="如果有这句话,这不用重复等待,可以noop跳过本轮", + ) + self._stop_event = asyncio.Event() + + wait_time = float(params.get("duration", 60)) # 转换为 float + start_time = asyncio.get_event_loop().time() + update_interval = 10.0 # 每10秒更新一次进度 + check_interval = 2.0 # 每2秒检查一次消息 + + try: + while True: + # 检查 stop event + if self._stop_event and self._stop_event.is_set(): + self.update_state(progress="等待被停止") + return + + elapsed = asyncio.get_event_loop().time() - start_time + remaining = wait_time - elapsed + + if remaining <= 0.0: + # 等待时间到 + break + + # 每次只检查1秒,避免长时间阻塞 + msg = await self.check_message(timeout=check_interval) + + if msg: + if isinstance(msg, ActionStopMsg): + self.update_state(progress="等待被停止") + # 被停止时不触发重新思考(由外部控制) + return + # SEND 消息可以调整等待时间或其他操作 + continue + + # 每隔 update_interval 更新一次进度 + elapsed = asyncio.get_event_loop().time() - start_time + remaining = wait_time - elapsed + if remaining > 0.0 and int(elapsed) % int(update_interval) == 0: + self.update_state( + progress=f"等待中(剩余 {int(remaining)} 秒)", + ) + + except asyncio.CancelledError: + self.update_state(progress="等待被取消") + return + finally: + self._stop_event = None + + # 记录实际等待时间 + actual_wait = asyncio.get_event_loop().time() - start_time + self.update_state(data={"actual_wait_time": actual_wait}) + self.update_state(progress="等待完成,将重新思考") + + # 正常完成,yield 一个标记 + yield ActionOutput( + action_name=self.instance_id or self.name, + type="completed", + content="", + ) + # 正常完成,会自动发送 completed 事件触发重新思考 diff --git a/astrbot/core/mind_sim/private/actions/__init__.py b/astrbot/core/mind_sim/private/actions/__init__.py new file mode 100644 index 0000000000..d7c96a5030 --- /dev/null +++ b/astrbot/core/mind_sim/private/actions/__init__.py @@ -0,0 +1,37 @@ +"""MindSim 动作模块 + +包含私聊和群聊场景下的动作实现。 +""" + +from astrbot.core.mind_sim.action import Action + +# 动作类导入 +from .EndConversation import EndConversationAction +from .NoOp import NoOpAction +from .Reply import ReplyAction +from .RunTask import RunTaskAction +from .Wait import WaitAction + +# 私聊可用动作 +PRIVATE_ACTIONS = [ + ReplyAction, + WaitAction, + NoOpAction, + EndConversationAction, + RunTaskAction, +] + + +def get_available_actions() -> list[type[Action]]: + """获取可用的动作类列表""" + return PRIVATE_ACTIONS + + +__all__ = [ + "ReplyAction", + "WaitAction", + "NoOpAction", + "EndConversationAction", + "RunTaskAction", + "get_available_actions", +] diff --git a/astrbot/core/mind_sim/private/brain.py b/astrbot/core/mind_sim/private/brain.py new file mode 100644 index 0000000000..7bc6263424 --- /dev/null +++ b/astrbot/core/mind_sim/private/brain.py @@ -0,0 +1,721 @@ +"""MindSim 私聊主思考模块 - 事件驱动架构 + +负责私聊场景下的思考: +1. 收集所有动作的状态和提示词贡献 +2. 用快速模型评估场景复杂度,选择思考等级 +3. 根据思考等级调用对应模型获取决策 +4. 执行决策(启动/发送/停止动作) +5. 处理动作产出并发送到消息平台 + +架构特点: +- 事件驱动:无主循环,通过 think_once() 触发思考 +- 多入口:用户消息、动作完成、等待结束都可触发思考 +- 动作完成自动触发下一轮思考 +""" + +import asyncio +import json +import random +import re +import time +from collections.abc import AsyncGenerator +from typing import Any + +from astrbot.core import logger +from astrbot.core.mind_sim.action import ActionExecutor +from astrbot.core.mind_sim.AgentMindSubStage import AgentMindSubStage +from astrbot.core.mind_sim.context import MindContext +from astrbot.core.mind_sim.messages import ( + ActionOutput, + ActionSendMsg, + ActionStateUpdate, + MindEvent, + MindEventType, +) +from astrbot.core.platform.astr_message_event import AstrMessageEvent + +from .actions import get_available_actions +from .prompts import ( + DECISION_FORMAT_PROMPT, + STUCK_PROMPT, + UPGRADE_THINKING_PROMPT, + build_action_states_prompt, + build_history_prompt, + build_main_thinking_prompt, + build_prompt_sections, + build_temp_prompts_section, +) + + +def parse_decision(llm_output: str) -> list[dict]: + """解析 LLM 输出为决策列表 + + 统一使用动作格式: + - START <动作名> + - SEND <动作名或实例ID> <消息内容> + - STOP <动作名或实例ID> + + 实例 ID 格式:<动作名>#<序号>,如 reply#1, wait#2 + """ + decisions = [] + + patterns = { + "START": re.compile(r"^START\s+([\w]+)\s*(\{.*\})?\s*$", re.IGNORECASE), + "SEND": re.compile(r"^SEND\s+([\w#]+)\s+(.+)$", re.IGNORECASE), + "STOP": re.compile(r"^STOP\s+([\w#]+)\s*$", re.IGNORECASE), + } + + for line in llm_output.strip().split("\n"): + line = line.strip() + if not line: + continue + + if not any(line.upper().startswith(cmd) for cmd in patterns): + continue + + for action_type, pattern in patterns.items(): + match = pattern.match(line) + if match: + decision = {"action": action_type} + groups = match.groups() + + if action_type == "START": + decision["target"] = groups[0] + decision["params"] = {} + if groups[1]: + try: + decision["params"] = json.loads(groups[1]) + except json.JSONDecodeError: + pass + elif action_type == "SEND": + decision["target"] = groups[0] + decision["message"] = groups[1].strip().strip("\"'") + elif action_type == "STOP": + decision["target"] = groups[0] + + decisions.append(decision) + break + + return decisions + + +MAX_LLM_ERROR_COUNT = 3 + + +class PrivateBrain: + """私聊主思考模块 - 事件驱动架构 + + 通过 ActionExecutor 统一管理动作实例,支持同一动作多实例并发。 + 无主循环,通过 think_once() 触发思考,动作完成自动触发下一轮。 + """ + + def __init__( + self, + ctx: MindContext, + persona: dict | None = None, + ): + self.ctx = ctx + self.persona = persona or {} + self.llm: AgentMindSubStage | None = None + + # 动作执行器 + self.executor = ActionExecutor( + ctx=ctx, send_callback=self._on_action_output, llm=None + ) + + # 注册动作类 + for action_cls in get_available_actions(): + self.executor.register(action_cls) + + # 事件输出队列(供外部监听) + self._event_queue: asyncio.Queue[MindEvent] = asyncio.Queue() + + # 思考状态 + self._thinking = False + self._think_requested = False + self._think_task: asyncio.Task | None = None + + # 思考节流机制(1秒内只触发一次思考,累积提示词) + self._last_think_time: float = 0 + self._think_cooldown: float = 1.0 # 思考冷却时间(秒) + self._pending_think_timer: asyncio.Task | None = None # 延迟思考定时器 + + # 是否有需要打断 wait 的事件待处理 + self._interrupt_wait_pending: bool = False + + # 中断事件(用于阻塞时被用户消息或动作消息打断) + self._interrupt_event: asyncio.Event = asyncio.Event() + + # 事件流状态 + self._stream_active = False + + # LLM 错误计数 + self._llm_error_count = 0 + + # 连续等待计数(用于检测是否卡住) + self._consecutive_wait_count: int = 0 + self._consecutive_wait_threshold: int = 3 # 连续等待3次后认为可能卡住 + self._first_wait_time: float = 0 # 第一次等待的时间戳 + self._stuck_min_duration: float = 60.0 # 卡住判断的最小时间(秒) + + # 思考传入提示词 + self._think_prompt_queue: asyncio.Queue[str] = asyncio.Queue() + + # 初始化心情(从高级人格配置中根据权重随机选择) + self._init_mood() + + logger.debug( + f"[PrivateBrain] 初始化完成,动作类: {self.executor.get_action_class_names()}" + ) + + def _init_mood(self): + """根据心情标签权重随机选择心情""" + # 从 persona 中获取心情标签配置 + personality_config = self.persona.get("personality_config", {}) + mood_tags = personality_config.get("mood_tags", []) + + if not mood_tags: + # 默认心情 + self.ctx.memory["current_mood"] = "平静" + return + + # 根据权重随机选择 + total_weight = sum(tag.get("weight", 0) for tag in mood_tags) + if total_weight <= 0: + self.ctx.memory["current_mood"] = "平静" + return + + rand_val = random.random() * total_weight + cumulative = 0 + selected_mood = "平静" + + for tag in mood_tags: + cumulative += tag.get("weight", 0) + if rand_val <= cumulative: + selected_mood = tag.get("name", "平静") + break + + self.ctx.memory["current_mood"] = selected_mood + logger.debug(f"[PrivateBrain] 初始心情: {selected_mood}") + + async def _on_action_output(self, output): + """动作产出回调,将产出转为事件放入队列""" + if output is None: + logger.warning("Received None output") + return + + reason = output.prompt if hasattr(output, "prompt") else "" + + if isinstance(output, ActionOutput): + # 根据输出类型转换为对应的 MindEvent + if output.type == "reply": + await self._event_queue.put( + MindEvent.reply(output.content, output.metadata) + ) + # 检查是否标记了不触发重新思考(如 EndConversation 的回复) + if not (output.metadata and output.metadata.get("no_think")): + self._interrupt_event.set() + # reply 发出后打断 wait,让主思考决定下一步 + self._interrupt_wait_pending = True + await self._schedule_think( + f"动作 {output.action_name} 发出了回复{reason}" + ) + elif output.type == "typing": + await self._event_queue.put(MindEvent.typing()) + elif output.type == "error": + await self._event_queue.put( + MindEvent.error(output.content, output.metadata) + ) + elif output.type == "completed": + # 动作完成,触发重新思考 + logger.debug( + f"[PrivateBrain] 动作 {output.action_name} 完成,触发重新思考{reason}" + ) + await self._schedule_think( + f"这次是动作{output.action_name} 完成的自动触发思考{reason}" + ) + elif output.type == "completed_no_think": + # 动作完成但不触发重新思考 + logger.debug( + f"[PrivateBrain] 动作 {output.action_name} 完成(不触发重新思考)" + ) + elif output.type == "end": + # 动作请求结束对话 + logger.info( + f"[PrivateBrain] 动作 {output.action_name} 请求结束对话: {output.content}" + ) + # 先停止所有其他正在运行的动作 + await self.executor.stop_all("结束对话,停止所有动作") + await self._event_queue.put(MindEvent.end(output.content)) + elif output.type == "request_think": + # 动作显式请求重新思考,打断 wait + logger.debug( + f"[PrivateBrain] 动作 {output.action_name} 请求重新思考: {reason}" + ) + self._interrupt_wait_pending = True + if reason: + await self._schedule_think( + f"这次是动作{output.action_name}由于原因是{reason}请求重新思考触发思考" + ) + else: + await self._schedule_think( + f"这次是动作{output.action_name}请求重新思考触发思考" + ) + elif output.type == "noop": + logger.info( + f"[PrivateBrain] 动作 {output.action_name} 什么都没做{reason}" + ) + elif isinstance(output, ActionStateUpdate): + pass + + def init_llm( + self, + event: AstrMessageEvent, + plugin_context: Any, + persona: dict, + ): + """初始化 LLM 实例""" + try: + self.llm = AgentMindSubStage.create_for_brain( + event=event, + plugin_context=plugin_context, + persona_config=persona, + ) + # 注入 Brain 的事件队列,让 call() 能发送 PIPELINE_YIELD + self.llm._mind_event_queue = self._event_queue + # 同步给 executor,让动作实例能拿到 llm + self.executor._llm = self.llm + except Exception as e: + logger.error(f"[PrivateBrain] 创建 AgentMindSubStage 失败: {e}") + self.llm = None + + async def handle_message( + self, + message: str, + sender_id: str, + sender_name: str, + ): + """处理用户消息 - 主要入口之一""" + logger.debug(f"[PrivateBrain] 收到用户消息: {message[:50]}...") + + # 触发中断(打断阻塞等待) + self._interrupt_event.set() + + # 标记本次思考由用户消息触发(需要打断 wait) + self._interrupt_wait_pending = True + + # 触发思考 + await self._schedule_think( + f"以下是这一轮思考的新的用户消息: {message} ,你可以决定要不要回复这条消息 这是一条新消息新的!" + ) + + async def _schedule_think(self, prompt: str | None = None): + """调度一次思考(节流机制:1秒内只触发一次,累积提示词)""" + # 1. 将提示词加入队列(无论是否立即思考) + if prompt: + await self._think_prompt_queue.put(prompt) + logger.debug(f"[PrivateBrain] 收到思考提示词,已加入队列: {prompt[:50]}...") + + # 2. 如果正在思考中,标记需要再次思考 + if self._thinking: + self._think_requested = True + logger.debug("[PrivateBrain] 思考中,标记待思考") + return + + # 3. 检查冷却时间 + current_time = time.time() + time_since_last_think = current_time - self._last_think_time + + if time_since_last_think < self._think_cooldown: + # 在冷却期内,延迟思考 + remaining_cooldown = self._think_cooldown - time_since_last_think + + # 如果已经有延迟定时器,不需要重复创建 + if self._pending_think_timer and not self._pending_think_timer.done(): + logger.debug( + f"[PrivateBrain] 冷却中,提示词已累积,等待 {remaining_cooldown:.2f}秒后统一思考" + ) + return + + # 创建延迟思考定时器 + logger.debug( + f"[PrivateBrain] 冷却中,延迟 {remaining_cooldown:.2f}秒后思考" + ) + self._pending_think_timer = asyncio.create_task( + self._delayed_think(remaining_cooldown) + ) + return + + # 4. 冷却完成,立即启动思考 + self._last_think_time = current_time + self._think_task = asyncio.create_task(self._do_think()) + + async def _delayed_think(self, delay: float): + """延迟思考(等待冷却时间后触发)""" + try: + await asyncio.sleep(delay) + + # 冷却完成,启动思考 + if not self._thinking: + self._last_think_time = time.time() + self._think_task = asyncio.create_task(self._do_think()) + logger.debug("[PrivateBrain] 冷却完成,启动延迟思考") + except asyncio.CancelledError: + logger.debug("[PrivateBrain] 延迟思考被取消") + except Exception as e: + logger.error(f"[PrivateBrain] 延迟思考异常: {e}") + + async def _do_think(self): + """执行思考(可能多轮)""" + self._thinking = True + try: + while True: + self._think_requested = False + + # 进入思考时,检查是否需要打断 wait + if self._interrupt_wait_pending: + await self.executor.stop_by_name("wait", "有新事件到达,打断等待") + self._interrupt_wait_pending = False + + # 清理已完成的动作实例 + await self.executor.cleanup_completed() + + # 构建提示词 + prompt = await self._build_prompt() + ORANGE = "\033[38;5;214m" + RESET = "\033[0m" + logger.debug(f"{ORANGE}[PrivateBrain] 思考提示词: {prompt}{RESET}") + + try: + if self.llm: + llm_response = await self._think(prompt) + decisions = parse_decision(llm_response or "") + logger.debug( + f"[PrivateBrain] LLM 决策: {[d.get('action') for d in decisions]}" + ) + + # 调用成功,重置错误计数 + self._llm_error_count = 0 + else: + decisions = [] + except Exception as e: + logger.error(f"[PrivateBrain] LLM 调用失败: {e}") + self._llm_error_count += 1 + + if self._llm_error_count >= MAX_LLM_ERROR_COUNT: + error_msg = ( + f"模型配置错误,已连续失败 {self._llm_error_count} 次。" + f"\n请检查高级人格的 LLM 模型配置是否正确。" + ) + logger.error(f"[PrivateBrain] {error_msg}") + await self._event_queue.put(MindEvent.error(error_msg)) + break + + await asyncio.sleep(1) + continue + + # 执行决策 + if decisions: + for decision in decisions: + await self._execute_decision(decision) + await asyncio.sleep(0.1) + + # 检查是否需要再次思考 + if not self._think_requested: + break + + except asyncio.CancelledError: + logger.info("[PrivateBrain] 思考被取消") + except Exception as e: + logger.error(f"[PrivateBrain] 思考异常: {e}") + finally: + self._thinking = False + # 检查是否应该发送 END 事件 + self._maybe_emit_end() + + async def _only_wait(self): + # 检测是否只有 wait 动作在运行(连续等待) + running_states = self.executor.get_running_states() + is_only_wait = ( + len(running_states) == 1 and running_states[0]["action_name"] == "wait" + ) + + if is_only_wait: + # 第一次等待,记录时间 + if self._consecutive_wait_count == 0: + self._first_wait_time = time.time() + + self._consecutive_wait_count += 1 + logger.debug( + f"[PrivateBrain] 检测到连续等待,当前次数: {self._consecutive_wait_count}" + ) + else: + # 有其他动作运行,重置计数和时间 + self._consecutive_wait_count = 0 + self._first_wait_time = 0 + + # 超过阈值时,检查时间条件 + stuck_hint = "" + if self._consecutive_wait_count >= self._consecutive_wait_threshold: + # 计算从第一次等待到现在的时间 + elapsed_time = ( + time.time() - self._first_wait_time if self._first_wait_time > 0 else 0 + ) + + # 只有当连续等待次数达标且时间超过阈值时才提示 + if elapsed_time >= self._stuck_min_duration: + stuck_hint = ( + STUCK_PROMPT + + f"检测到连续等待,连续等待 {self._consecutive_wait_count} 次,持续 {int(elapsed_time)} 秒,没特别的不要超过5分钟" + ) + logger.debug( + f"[PrivateBrain] 连续等待 {self._consecutive_wait_count} 次," + f"持续 {int(elapsed_time)} 秒,添加结束提示" + ) + + # 将结束提示加入队列 + if stuck_hint: + await self._think_prompt_queue.put(stuck_hint) + + def _maybe_emit_end(self): + """检查是否应该发送 END 事件(无动作运行且无待思考)""" + if ( + not self._thinking + and not self.executor.has_running() + and not self._think_requested + ): + logger.debug("[PrivateBrain] 思考完成,发送 END 事件") + asyncio.create_task(self._event_queue.put(MindEvent.end("思考完成"))) + + async def get_event_stream(self) -> AsyncGenerator[MindEvent, None]: + """获取输出事件流 + + 外部(如 internal_mind.py)通过这个方法获取 MindSim 的输出事件。 + 事件流在收到 END 事件后结束。 + """ + self._stream_active = True + try: + while True: + try: + event = await asyncio.wait_for(self._event_queue.get(), timeout=5) + + if event.type == MindEventType.END: + logger.debug( + "[PrivateBrain] 收到 END 事件,关闭事件流" + ) # todo这里还要检查是否由运行中的动作,思考,确保结束时候这个类是干净的 + yield event + break + + yield event + + except asyncio.TimeoutError: + # 超时检查:如果无动作运行且无思考,发送 END + if not self._thinking and not self.executor.has_running(): + logger.debug("[PrivateBrain] 超时且空闲,发送 END 事件") + yield MindEvent.end(reason="思考超时") + break + finally: + self._stream_active = False + + async def _think(self, prompt: str) -> str: + """统一的思考入口:先快速模型评估,按需升级""" + # 快速模型调用(包含升级思考模块) + # 使用 call_simple 直接获取文本响应 + fast_response = await self.llm.call_simple( + prompt=prompt, + role="fast", + ) + logger.debug(f"[PrivateBrain] 快速思考结果: {fast_response}") + + need_role = self._parse_need_deeper(fast_response) + + if need_role == "fast": + return fast_response + + logger.info(f"[PrivateBrain] 升级到 {need_role} 思考") + + # 升级思考时不传入升级模块(避免循环升级) + upgraded_prompt = await self._build_prompt(include_upgrade=False) + response = await self.llm.call_simple( + prompt=upgraded_prompt, + role=need_role, + ) + logger.debug(f"[PrivateBrain] {need_role} 思考结果: {response}") + return response + + @staticmethod + def _parse_need_deeper(fast_response: str) -> str: + """从快速模型的输出中解析是否需要升级思考""" + if not fast_response: + return "fast" + + match = re.search(r"NEED_DEEPER:\s*(MEDIUM|DEEP)", fast_response, re.IGNORECASE) + if not match: + return "fast" + + level_str = match.group(1).upper() + if level_str == "DEEP": + return "deep" + elif level_str == "MEDIUM": + return "medium" + return "fast" + + async def _wait_for_interrupt(self, timeout: float) -> str: + """阻塞主思考,等待中断 + + 被以下事件打断: + - 用户消息到达(handle_message 设置 _interrupt_event) + - 动作产出到达(_on_action_output 设置 _interrupt_event) + - 超时 + """ + self._interrupt_event.clear() + try: + await asyncio.wait_for(self._interrupt_event.wait(), timeout=timeout) + return "interrupted" + except asyncio.TimeoutError: + return "timeout" + + async def _build_prompt(self, include_upgrade: bool = True) -> str: + """构建思考提示词 + + Args: + include_upgrade: 是否包含升级思考模块(快速模型需要,升级后的模型不需要) + """ + # 系统提示词 + system_prompt = build_main_thinking_prompt( + persona=self.persona, + ctx=self.ctx, + action_infos=self.executor.get_action_infos(), + ) + + # 当前运行的动作实例状态 + running_states = self.executor.get_running_states() + states_prompt = ( + build_action_states_prompt(running_states) if running_states else "" + ) + + # 临时提示词 + if include_upgrade: + temp_contents = self.executor.tick_temp_prompts(consume_rounds=False) + else: + temp_contents = self.executor.tick_temp_prompts(consume_rounds=True) + + temp_prompt = build_temp_prompts_section(temp_contents) if temp_contents else "" + + # 最近对话历史(从数据库读取) + history = [] + if self.ctx.conv_manager and self.ctx.conversation_id: + conversation = await self.ctx.conv_manager.get_conversation( + self.ctx.unified_msg_origin, self.ctx.conversation_id + ) + if conversation and conversation.history: + history = json.loads(conversation.history) + + # 从聊天配置中获取消息条数,默认为 10 + chat_config = self.ctx.chat_config or {} + message_length = chat_config.get("message_length", 10) + if not isinstance(message_length, int) or message_length < 1: + message_length = 10 + + history_prompt = build_history_prompt(history, max_turns=message_length) + + # 决策格式(可选升级思考模块) + decision_section = DECISION_FORMAT_PROMPT + if include_upgrade: + decision_section += UPGRADE_THINKING_PROMPT + + # 传入思考的提示词 + queue_prompts = [] + while not self._think_prompt_queue.empty(): + try: + prompt = self._think_prompt_queue.get_nowait() + queue_prompts.append(prompt) + self._think_prompt_queue.task_done() + except asyncio.QueueEmpty: + break + queue_section = "" + if queue_prompts: + queue_section = "【额外思考提示】\n" + for i, prompt in enumerate(queue_prompts, 1): + queue_section += f"{i}. {prompt}\n" + + # 使用灵活组装器 + return build_prompt_sections( + system_prompt, + states_prompt, + temp_prompt, + history_prompt, + decision_section, + queue_section, + ) + + async def _execute_decision(self, decision: dict): + """执行决策""" + action_type = decision.get("action") + + try: + if action_type == "START": + await self._exec_start(decision) + elif action_type == "SEND": + await self._exec_send(decision) + elif action_type == "STOP": + await self._exec_stop(decision) + except Exception as e: + logger.error(f"[PrivateBrain] 执行决策失败: {e}") + + async def _exec_start(self, decision: dict): + """执行 START 决策""" + action_name = decision.get("target") + params = decision.get("params", {}) + + if action_name not in self.executor.get_action_class_names(): + logger.warning(f"[PrivateBrain] 未知动作: {action_name}") + return + + logger.info(f"[PrivateBrain] 启动动作: {action_name}") + + # 通过 executor 启动 + instance_id, pre_result = await self.executor.start(action_name, params) + await self._only_wait() + # 处理预执行结果 + if pre_result and pre_result.block: + logger.info( + f"[PrivateBrain] 动作 {instance_id} 请求阻塞主思考: " + f"{pre_result.block_reason} (超时 {pre_result.block_timeout}s)" + ) + result = await self._wait_for_interrupt(pre_result.block_timeout) + logger.info(f"[PrivateBrain] 阻塞结束: {result}") + + async def _exec_send(self, decision: dict): + """执行 SEND 决策""" + target = decision.get("target", "") + message = decision.get("message", "") + + instance_id = self.executor.resolve_instance_id(target) + if not instance_id: + logger.warning(f"[PrivateBrain] 无法解析目标: {target}") + return + + logger.debug(f"[PrivateBrain] 向实例 {instance_id} 发送消息") + await self.executor.send_to( + instance_id, + ActionSendMsg( + action_name=instance_id, + message=message, + ), + ) + + async def _exec_stop(self, decision: dict): + """执行 STOP 决策""" + target = decision.get("target", "") + + instance_id = self.executor.resolve_instance_id(target) + if not instance_id: + if target in self.executor.get_action_class_names(): + await self.executor.stop_by_name(target, "主思考决策停止") + else: + logger.warning(f"[PrivateBrain] 无法解析目标: {target}") + return + + logger.info(f"[PrivateBrain] 停止实例: {instance_id}") + await self.executor.stop_instance(instance_id, "主思考决策停止") diff --git a/astrbot/core/mind_sim/private/prompts.py b/astrbot/core/mind_sim/private/prompts.py new file mode 100644 index 0000000000..4644a071c6 --- /dev/null +++ b/astrbot/core/mind_sim/private/prompts.py @@ -0,0 +1,384 @@ +"""MindSim 提示词模块 - 动作决策和思考相关提示词 + +包含: +- 决策格式提示词(支持 instance_id) +- 升级思考提示词(独立模块,快速模型用) +- 连续等待提示词(卡住时建议) +- 可用动作描述提示词(从 Action 元信息动态生成) +- 主思考系统提示词(从 Personality 配置读取) +- 动作实例状态提示词(基于 instance_id) +- 临时提示词渲染 +- 对话历史提示词(带日期时间) +- 灵活提示词组装器(build_prompt_sections) +""" + +from __future__ import annotations + +from datetime import datetime +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from astrbot.core.db.po import Personality + from astrbot.core.mind_sim.context import MindContext + + +# ========== 决策格式提示词 ========== + +DECISION_FORMAT_PROMPT = """ +## 决策输出格式 + +你需要在回复中做出决策来控制你的行为。每行一个决策,格式如下: +JSON参数由动作描述决定 +``` +START <动作名> # 启动新动作实例 +SEND <动作名或实例ID> <消息内容> # 向运行中的动作发消息 +STOP <动作名或实例ID> # 停止动作 +``` +### 实例 ID +每个启动的动作会被分配一个唯一的实例 ID,格式为 `<动作名>#<序号>`。 +例如启动两次 reply,会分别得到 `reply#1` 和 `reply#2`。 +SEND 和 STOP 可以用实例 ID 精确控制某个特定实例,也可以用动作名控制最新的实例。 +### 示例 +START wait {"duration": 30} +### 注意事项 +1. 同一动作可以多次启动,互不阻塞(如同时发多条回复) +2. 用 SEND 可以影响正在运行的动作,引导其行为 +3. 动作的状态会在下一轮思考时显示给你 +4. 动作执行完成后会自动结束,不需要手动 STOP +5. 当不知道做什么时,可以使用 START wait {"duration": 60} 等待 +""" + + +# ========== 升级思考提示词(独立模块,升级后的模型不需要此部分) ========== + +UPGRADE_THINKING_PROMPT = """ +### 升级思考 +如果你觉得当前场景太复杂(正在执行的动作过多、动作之间有冲突或依赖、需要更深入的推理), +你可以在输出中加一行请求升级思考,系统会用更强的模型重新思考: +``` +NEED_DEEPER: MEDIUM # 请求中度思考(中等复杂度) +NEED_DEEPER: DEEP # 请求深度思考(高复杂度) +``` +注意:如果你输出了 NEED_DEEPER,当前这次的决策会被丢弃,由更强模型重新决策。 +所以只在你确实觉得自己处理不好的时候才请求升级。 +""" + + +# ========== 连续等待提示词(卡住建议) ========== + +STUCK_PROMPT = """ +### 长时间等待提示 +你已经连续等待多次,且对话没有进展。 +**如果确实没有更多可以说的,可以考虑使用 end_conversation 动作结束对话。** +""" + + +# ========== 主思考系统提示词模板 ========== + +MAIN_THINKING_SYSTEM_PROMPT = """你是{bot_name},一个具有自主思考能力的真人。 + +## 系统提示词 +{system_prompt} +## 你的特点 +{personality_traits} +## 当前心情 +{mood} +## 表达风格 +{expression_style} +## 当前状态 +- 当前时间:{current_time} +- 聊天对象:{chat_target} +- 私聊/群聊:{chat_type} +## 核心能力 +你可以通过启动不同的动作来增强你的能力。 +**重要:同一动作可以多次启动并行运行**,系统会用实例 ID 区分(如 reply#1, reply#2)。 +你可以同时启动多个不同的动作,它们会并行执行,互不阻塞。 +## 决策原则 +1. **优先使用动作**来获取信息或执行任务 +2. **可以同时启动多个动作**,例如同时发送多条回复、同时执行多个任务 +3. 如果动作正在进行中,可以通过 SEND 来引导其行为 +4. 适时使用 wait 动作来等待对方回复或收集更多信息 +5. reply 动作用于直接回复用户 +6. 保持自然、有趣的对话风格 +{action_options} +""" + + +def build_main_thinking_prompt( + persona: Personality, + ctx: MindContext, + action_infos: list[dict], +) -> str: + """构建主思考系统提示词 + + 直接从 Personality 思想人格配置和 MindContext 读取所有参数, + 动作选项从 ActionExecutor.get_action_infos() 动态生成。 + + Args: + persona: Personality 人格配置 + ctx: MindContext 会话上下文 + action_infos: 动作元信息列表(来自 executor.get_action_infos()) + + Returns: + 完整的系统提示词 + """ + # 从 personality_config 提取人格特质和表达风格 + personality_config = persona.get("personality_config") or {} + traits = personality_config.get("traits", "") + expression_style = personality_config.get("expression_style", "") + + # 从 robot_config 提取机器人名称 + robot_config = persona.get("robot_config") or {} + bot_name = robot_config.get("nickname") or persona.get("name", "助手") + + # 系统提示词 + system_prompt = persona.get("prompt", "") + + # 心情(从上下文内存中获取) + mood = ctx.memory.get("current_mood", "平静") + + # 从上下文获取聊天信息 + chat_target = ctx.user_name or "用户" + chat_type = "私聊" if ctx.is_private else "群聊" + current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S") + + # 动态构建动作选项 + action_options = build_action_options_prompt(action_infos) + + return MAIN_THINKING_SYSTEM_PROMPT.format( + bot_name=bot_name, + system_prompt=system_prompt or "你是一个助手", + personality_traits=traits or "善良、智能、有趣", + mood=mood, + expression_style=expression_style or "自然、友好", + current_time=current_time, + chat_target=chat_target, + chat_type=chat_type, + action_options=action_options, + ) + + +# ========== 动作选项提示词 ========== + +ACTION_OPTIONS_TEMPLATE = """ +## 可用动作 +{actions_description} +""" + + +def build_action_options_prompt(action_infos: list[dict]) -> str: + """从动作元信息列表动态构建可用动作提示词 + + 按 priority 降序排列,拼接 description 和 usage_guide。 + + Args: + action_infos: 动作元信息列表(来自 executor.get_action_infos()) + + Returns: + 动作选项提示词 + """ + if not action_infos: + return "暂无可用动作" + + # 已按 priority 降序排列(executor.get_action_infos 已排序) + lines = [] + for info in action_infos: + name = info["name"] + running_count = info.get("running_count", 0) + status = f"({running_count} 个实例运行中)" if running_count > 0 else "" + + lines.append(f"### {name} {status}") + if info["description"]: + lines.append(f"{info['description']}") + if info["usage_guide"]: + lines.append(f"使用指南:{info['usage_guide']}") + if info["fixed_prompt"] and running_count > 0: + lines.append(f"运行时提示:{info['fixed_prompt']}") + lines.append("") + + return ACTION_OPTIONS_TEMPLATE.format(actions_description="\n".join(lines)) + + +# ========== 动作实例状态提示词 ========== + +ACTION_STATES_TEMPLATE = """ +## 当前运行的动作实例 +{running_instances} +""" + + +def build_action_states_prompt(running_states: list[dict]) -> str: + """构建当前动作实例状态提示词 + + 基于 instance_id 展示每个运行中实例的状态。 + + Args: + running_states: 运行中实例状态列表(来自 executor.get_running_states()) + 每项包含: instance_id, action_name, state (ActionState) + + Returns: + 动作实例状态提示词 + """ + if not running_states: + return "" + + lines = [] + for item in running_states: + instance_id = item["instance_id"] + state = item["state"] + + lines.append(f"### {instance_id}") + + # 支持 ActionState 对象和 dict 两种格式 + if isinstance(state, dict): + status = state.get("status", "") + if status != "running": + continue + lines.append(f"状态:{status}") + progress = state.get("progress") + if progress: + lines.append(f"进度:{progress}") + prompt_contribution = state.get("prompt_contribution") + if prompt_contribution: + lines.append(f"详情:{prompt_contribution}") + data = state.get("data", {}) + if data: + key_data = {k: v for k, v in data.items() if not k.startswith("_")} + if key_data: + lines.append(f"数据:{key_data}") + else: + # ActionState 对象 + if state.status != "running": + continue + lines.append(f"状态:{state.status}") + if state.progress: + lines.append(f"进度:{state.progress}") + if state.prompt_contribution: + lines.append(f"详情:{state.prompt_contribution}") + if state.data: + key_data = { + k: v for k, v in state.data.items() if not k.startswith("_") + } + if key_data: + lines.append(f"数据:{key_data}") + + lines.append("") + + if not lines: + return "" + + return ACTION_STATES_TEMPLATE.format(running_instances="\n".join(lines)) + + +# ========== 临时提示词 ========== + +TEMP_PROMPTS_TEMPLATE = """ +## 临时提醒 +{prompts} +""" + + +def build_temp_prompts_section(temp_contents: list[str]) -> str: + """构建临时提示词段落 + + Args: + temp_contents: 临时提示词内容列表(来自 executor.tick_temp_prompts()) + + Returns: + 临时提示词段落 + """ + if not temp_contents: + return "" + + prompts = "\n".join(f"- {p}" for p in temp_contents) + return TEMP_PROMPTS_TEMPLATE.format(prompts=prompts) + + +# ========== 对话历史提示词 ========== + +HISTORY_TEMPLATE = """ +## 最近对话 +{chat_history} +""" + + +def build_history_prompt( + conversation_history: list[dict], + max_turns: int = 10, +) -> str: + """构建对话历史提示词(带日期时间) + + Args: + conversation_history: 对话历史列表 + max_turns: 最大轮数 + + Returns: + 对话历史提示词 + """ + if not conversation_history: + return "暂无对话历史" + + history = conversation_history[-max_turns:] + + lines = [] + for msg in history: + role = msg.get("role", "unknown") + content = msg.get("content", "") + sender = msg.get("sender_name", "") + timestamp = msg.get("timestamp") + + # 格式化时间 + time_str = "" + if timestamp: + try: + if isinstance(timestamp, (int, float)): + dt = datetime.fromtimestamp(timestamp) + elif isinstance(timestamp, str): + dt = datetime.fromisoformat(timestamp) + else: + dt = None + if dt: + time_str = f"[{dt.strftime('%Y-%m-%d %H:%M:%S')}] " + except (ValueError, OSError): + pass + + if role == "user": + prefix = f"{sender}: " if sender else "用户: " + elif role == "assistant": + prefix = "你: " + else: + prefix = f"{role}: " + + lines.append(f"{time_str}{prefix}{content}") + + return HISTORY_TEMPLATE.format(chat_history="\n".join(lines)) + + +# ========== 灵活提示词组装器 ========== + + +def build_prompt_sections( + *sections: str, + separator: str = "\n\n---\n\n", +) -> str: + """灵活组装多个提示词段落 + + 将传入的多个提示词段落用分隔符连接起来,自动过滤空段落。 + + Args: + *sections: 可变数量的提示词段落 + separator: 段落之间的分隔符,默认 "\n\n---\n\n" + + Returns: + 组装好的完整提示词 + + Example: + >>> prompt = build_prompt_sections( + ... "## 系统提示\n你是助手", + ... "## 用户输入\n你好", + ... "## 决策格式\nSTART ...", + ... ) + """ + # 过滤空段落 + valid_sections = [s for s in sections if s and s.strip()] + return separator.join(valid_sections) diff --git a/astrbot/core/persona_mgr.py b/astrbot/core/persona_mgr.py index 6320ac3bbc..cbbc4bdcdb 100644 --- a/astrbot/core/persona_mgr.py +++ b/astrbot/core/persona_mgr.py @@ -14,6 +14,10 @@ tools=None, skills=None, custom_error_message=None, + personality_config=None, + chat_config=None, + robot_config=None, + is_advanced=False, _begin_dialogs_processed=[], _mood_imitation_dialogs_processed="", ) @@ -142,6 +146,11 @@ async def update_persona( tools: list[str] | None | object = NOT_GIVEN, skills: list[str] | None | object = NOT_GIVEN, custom_error_message: str | None | object = NOT_GIVEN, + personality_config: dict | None | object = NOT_GIVEN, + chat_config: dict | None | object = NOT_GIVEN, + robot_config: dict | None | object = NOT_GIVEN, + llm_model_config: dict | None | object = NOT_GIVEN, + is_advanced: bool | object = NOT_GIVEN, ): """更新指定 persona 的信息。tools 参数为 None 时表示使用所有工具,空列表表示不使用任何工具""" existing_persona = await self.db.get_persona_by_id(persona_id) @@ -154,6 +163,16 @@ async def update_persona( update_kwargs["skills"] = skills if custom_error_message is not NOT_GIVEN: update_kwargs["custom_error_message"] = custom_error_message + if personality_config is not NOT_GIVEN: + update_kwargs["personality_config"] = personality_config + if chat_config is not NOT_GIVEN: + update_kwargs["chat_config"] = chat_config + if robot_config is not NOT_GIVEN: + update_kwargs["robot_config"] = robot_config + if llm_model_config is not NOT_GIVEN: + update_kwargs["llm_model_config"] = llm_model_config + if is_advanced is not NOT_GIVEN: + update_kwargs["is_advanced"] = is_advanced persona = await self.db.update_persona( persona_id, @@ -322,6 +341,11 @@ async def create_persona( custom_error_message: str | None = None, folder_id: str | None = None, sort_order: int = 0, + personality_config: dict | None = None, + chat_config: dict | None = None, + robot_config: dict | None = None, + llm_model_config: dict | None = None, + is_advanced: bool = False, ) -> Persona: """创建新的 persona。 @@ -333,6 +357,11 @@ async def create_persona( skills: Skills 列表,None 表示使用所有 Skills,空列表表示不使用任何 Skills folder_id: 所属文件夹 ID,None 表示根目录 sort_order: 排序顺序 + personality_config: 高级人格配置 - 人格特质、表达风格、识别规则、心情标签等 + chat_config: 高级人格配置 - 聊天频率、动态频率、消息长度等 + robot_config: 高级人格配置 - 昵称、别名、平台等 + llm_model_config: 高级人格配置 - 模型配置(功能模型、回复模型、思考模型) + is_advanced: 是否为高级人格 """ if await self.db.get_persona_by_id(persona_id): raise ValueError(f"Persona with ID {persona_id} already exists.") @@ -345,6 +374,11 @@ async def create_persona( custom_error_message=custom_error_message, folder_id=folder_id, sort_order=sort_order, + personality_config=personality_config, + chat_config=chat_config, + robot_config=robot_config, + llm_model_config=llm_model_config, + is_advanced=is_advanced, ) self.personas.append(new_persona) self.get_v3_persona_data() @@ -370,6 +404,11 @@ def get_v3_persona_data( "tools": persona.tools, "skills": persona.skills, "custom_error_message": persona.custom_error_message, + "personality_config": persona.personality_config, + "chat_config": persona.chat_config, + "robot_config": persona.robot_config, + "is_advanced": persona.is_advanced, + "llm_model_config": persona.llm_model_config, } for persona in self.personas ] diff --git a/astrbot/core/pipeline/process_stage/method/agent_request.py b/astrbot/core/pipeline/process_stage/method/agent_request.py index 9efe538146..c1eddc26e8 100644 --- a/astrbot/core/pipeline/process_stage/method/agent_request.py +++ b/astrbot/core/pipeline/process_stage/method/agent_request.py @@ -7,6 +7,7 @@ from ...context import PipelineContext from ..stage import Stage from .agent_sub_stages.internal import InternalAgentSubStage +from .agent_sub_stages.internal_mind import InternalMindSubStage from .agent_sub_stages.third_party import ThirdPartyAgentSubStage @@ -27,9 +28,13 @@ async def initialize(self, ctx: PipelineContext) -> None: agent_runner_type = self.config["provider_settings"]["agent_runner_type"] if agent_runner_type == "local": self.agent_sub_stage = InternalAgentSubStage() + self.mind_sub_stage = InternalMindSubStage() else: self.agent_sub_stage = ThirdPartyAgentSubStage() + self.mind_sub_stage = None await self.agent_sub_stage.initialize(ctx) + if self.mind_sub_stage: + await self.mind_sub_stage.initialize(ctx) async def process(self, event: AstrMessageEvent) -> AsyncGenerator[None, None]: if not self.ctx.astrbot_config["provider_settings"]["enable"]: @@ -44,5 +49,16 @@ async def process(self, event: AstrMessageEvent) -> AsyncGenerator[None, None]: ) return - async for resp in self.agent_sub_stage.process(event, self.prov_wake_prefix): + # 根据是否为高级人格选择子阶段 + sub_stage = self.agent_sub_stage + if event.is_advanced_persona and self.mind_sub_stage: + logger.debug( + f"会话 {event.unified_msg_origin} 使用高级人格,使用 InternalMindSubStage" + ) + sub_stage = self.mind_sub_stage + + # 将事件和提供商唤醒前缀传递给代理子阶段处理 + # 异步生成所有响应 + async for resp in sub_stage.process(event, self.prov_wake_prefix): + # 生成每个响应 yield resp diff --git a/astrbot/core/pipeline/process_stage/method/agent_sub_stages/internal_mind.py b/astrbot/core/pipeline/process_stage/method/agent_sub_stages/internal_mind.py new file mode 100644 index 0000000000..b8d82658fc --- /dev/null +++ b/astrbot/core/pipeline/process_stage/method/agent_sub_stages/internal_mind.py @@ -0,0 +1,394 @@ +"""高级人格 MindSim 子阶段 + +作为 AstrMessageEvent 和 MindSim 之间的桥梁(适配器),负责: +1. 从 AstrMessageEvent 提取信息,构建 MindContext +2. 调用 factory 启动 MindSim 并获取事件流 +3. 监听 MindEvent 事件流,将回复发送到消息平台 +4. 不控制事件生命周期(由主思考 Brain 决定何时结束) + +职责划分: +- internal_mind:管理 Brain 生命周期、监听事件流 +- ReplyAction:生成回复、发送消息、保存 AI 回复到历史 +- MemoryManager:不在此阶段管理(由其他模块处理) +""" + +import json +from collections.abc import AsyncGenerator + +from astrbot.core import logger +from astrbot.core.message.components import ( + BaseMessageComponent, + Face, + File, + Image, + Plain, + Reply, + WechatEmoji, +) +from astrbot.core.message.message_event_result import MessageChain +from astrbot.core.mind_sim import MindContext +from astrbot.core.mind_sim.dispatcher import PrivateBrainFactory +from astrbot.core.mind_sim.messages import MindEventType +from astrbot.core.pipeline.stage import Stage +from astrbot.core.platform.astr_message_event import AstrMessageEvent + +from ....context import PipelineContext + + +async def _get_or_create_conversation(event: AstrMessageEvent, conv_manager): + """获取或创建当前会话的对话""" + # 先尝试获取当前对话ID + cid = await conv_manager.get_curr_conversation_id(event.unified_msg_origin) + if cid: + conversation = await conv_manager.get_conversation( + event.unified_msg_origin, cid + ) + if conversation: + return conversation + + # 如果没有当前对话,创建新的 + cid = await conv_manager.new_conversation( + event.unified_msg_origin, event.get_platform_id() + ) + conversation = await conv_manager.get_conversation(event.unified_msg_origin, cid) + if not conversation: + raise RuntimeError("无法创建新的对话。") + return conversation + + +class InternalMindSubStage(Stage): + """高级人格 MindSim 子阶段""" + + async def initialize(self, ctx: PipelineContext) -> None: + self.ctx = ctx + self.conv_manager = ctx.plugin_manager.context.conversation_manager + # 每个 InternalMindSubStage 实例持有一个 BrainFactory + # 不再使用全局单例 + self._brain_factory = PrivateBrainFactory() + # 图片描述缓存:{event_id: caption_text},避免重复描述同一张图片 + self._image_caption_cache: dict[str, str] = {} + + async def process( + self, event: AstrMessageEvent, provider_wake_prefix: str + ) -> AsyncGenerator[None, None]: + """处理高级人格事件 + + 流程: + 1. 获取或创建对话 + 2. 预处理消息(图片转描述、表情描述、文件描述) + 3. 保存用户消息到历史(包含完整的媒体描述) + 4. 获取/创建 Brain 实例(通过 PrivateBrainFactory) + 5. 监听事件流,每条回复发送并保存 + """ + # 1. 获取或创建对话 + conversation = await _get_or_create_conversation(event, self.conv_manager) + conversation_id = conversation.cid + + # 2. 获取高级人格配置(必须先于预处理,因为预处理需要人格的图片描述模型配置) + persona = await self._resolve_persona(event) + + # 3. 预处理消息:提取图片/表情/文件,生成文本描述(传入 persona 以读取图片描述模型) + processed_message = await self._preprocess_message(event, persona) + + # 4. 保存用户消息到历史 + await self._save_user_message(event, conversation_id, processed_message) + + # 5. 构建 MindContext + mind_ctx = self._build_mind_context(event, conversation_id, persona) + + # 6. 启动事件流,ReplyAction 通过 event.send() 直接发送回复 + # dispatch() 内部已处理活跃事件流的消息投递 + # 使用预处理后的消息(包含图片描述等)替代原始 message_str + event_stream = self._brain_factory.dispatch( + ctx=mind_ctx, + message=processed_message, + sender_id=event.get_sender_id(), + sender_name=event.get_sender_name(), + persona=persona, + ) + + async for mind_event in event_stream: + if mind_event.type == MindEventType.TYPING: + await event.send_typing() + elif mind_event.type == MindEventType.ERROR: + error_msg = mind_event.data.get("message", "思考出错") + logger.error(f"[InternalMindSubStage] MindSim 错误: {error_msg}") + await event.send(MessageChain([Plain(f"[错误] {error_msg}")])) + elif mind_event.type == MindEventType.PIPELINE_YIELD: + # AgentMindSubStage 请求 pipeline yield + # event.result 已由 AgentMindSubStage 设置好 + done_event = mind_event.data.get("done_event") + logger.debug( + "[InternalMindSubStage] 收到 PIPELINE_YIELD,yield 给 pipeline" + ) + yield # 传递给 pipeline 框架,RespondStage 处理 event.result + # pipeline yield 返回后,通知 AgentMindSubStage 继续 + if done_event: + done_event.set() + elif mind_event.type == MindEventType.END: + logger.debug("[InternalMindSubStage] 收到 END 事件,思考结束") + break + + return + yield # noqa: 使函数保持 AsyncGenerator 类型 + + def _build_mind_context( + self, + event: AstrMessageEvent, + conversation_id: str, + persona: dict, + ) -> MindContext: + """从 AstrMessageEvent 构建 MindContext""" + plugin_context = self.ctx.plugin_manager.context + return MindContext( + session_id=str(event.session), + unified_msg_origin=event.unified_msg_origin, + is_private=event.is_private_chat(), + persona_id=getattr(event, "_persona_id", "default"), + system_prompt=persona.get("prompt", ""), + personality_config=persona.get("personality_config", {}), + chat_config=persona.get("chat_config", {}), + robot_config=persona.get("robot_config", {}), + user_id=event.get_sender_id(), + user_name=event.get_sender_name(), + conv_manager=self.conv_manager, + conversation_id=conversation_id, + event=event, + plugin_context=plugin_context, + ) + + async def _resolve_persona(self, event: AstrMessageEvent) -> dict: + """从 PersonaManager 解析当前会话的高级人格配置 + + resolve_selected_persona 返回的是 Personality TypedDict(本质是 dict), + 可以直接用 .get() 访问嵌套字段。 + """ + try: + plugin_context = self.ctx.plugin_manager.context + persona_manager = plugin_context.persona_manager + cfg = plugin_context.get_config(event.unified_msg_origin) + provider_settings = cfg.get("provider_settings", {}) + + persona_id, persona, _, _ = await persona_manager.resolve_selected_persona( + umo=event.unified_msg_origin, + conversation_persona_id=getattr(event, "_persona_id", None), + platform_name=event.get_platform_name(), + provider_settings=provider_settings, + ) + # Persona 是 Personality TypedDict(dict 的别名),直接返回 + return persona or {} + except Exception as e: + logger.warning(f"[InternalMindSubStage] 解析人格配置失败: {e}") + return {} + + async def _save_user_message( + self, event: AstrMessageEvent, conversation_id: str, processed_message: str + ) -> None: + """保存用户消息到历史 + + Args: + event: 消息事件 + conversation_id: 对话 ID + processed_message: 预处理后的消息文本(包含媒体描述) + """ + try: + conversation = await self.conv_manager.get_conversation( + event.unified_msg_origin, conversation_id + ) + history = ( + json.loads(conversation.history) + if conversation and conversation.history + else [] + ) + # 保存预处理后的消息,而非原始 message_str(保留了图片/表情描述) + history.append({"role": "user", "content": processed_message}) + await self.conv_manager.update_conversation( + event.unified_msg_origin, + conversation_id, + history=history, + ) + except Exception as e: + logger.warning(f"[InternalMindSubStage] 保存用户消息失败: {e}") + + async def _preprocess_message(self, event: AstrMessageEvent, persona: dict) -> str: + """预处理用户消息,提取并描述图片、表情、文件等媒体 + + Args: + event: 消息事件 + persona: 高级人格配置(用于读取图片描述模型配置) + + Returns: + 预处理后的完整消息文本 + """ + parts: list[str] = [] + + # 获取基础文本(已去除唤醒前缀) + base_text = event.message_str.strip() + if base_text: + parts.append(base_text) + + # 获取消息链 + message_chain = getattr(event.message_obj, "message", []) + if not message_chain: + return event.message_str + + # 遍历消息组件,提取媒体描述(传入 persona 以读取图片描述模型) + media_descriptions = await self._extract_media_descriptions( + event, message_chain, persona + ) + parts.extend(media_descriptions) + + return "\n".join(parts).strip() or event.message_str + + async def _extract_media_descriptions( + self, + event: AstrMessageEvent, + components: list[BaseMessageComponent], + persona: dict, + ) -> list[str]: + """从消息组件中提取媒体描述 + + Args: + event: 消息事件 + components: 消息组件列表 + persona: 高级人格配置 + + Returns: + 媒体描述文本列表 + """ + descriptions: list[str] = [] + image_paths: list[str] = [] + + for comp in components: + if isinstance(comp, Plain): + # Plain 文本已通过 message_str 处理,跳过避免重复 + continue + elif isinstance(comp, Image): + try: + image_path = await comp.convert_to_file_path() + image_paths.append(image_path) + except Exception as e: + logger.warning(f"[InternalMindSubStage] 转换图片失败: {e}") + descriptions.append("[图片(无法读取)]") + elif isinstance(comp, Face): + # QQ 表情 ID 转为描述 + descriptions.append(f"[QQ表情: {comp.id}]") + elif isinstance(comp, WechatEmoji): + # 微信表情描述 + emoji_desc = self._describe_wechat_emoji(comp) + descriptions.append(f"[微信表情: {emoji_desc}]") + elif isinstance(comp, File): + # 文件描述 + file_name = getattr(comp, "name", None) or getattr( + comp, "file", "未知文件" + ) + file_size = getattr(comp, "size", None) + size_str = f" ({file_size} bytes)" if file_size else "" + descriptions.append(f"[文件: {file_name}{size_str}]") + elif isinstance(comp, Reply): + # 处理引用消息中的媒体 + if comp.chain: + chain_descs = await self._extract_media_descriptions( + event, comp.chain, persona + ) + descriptions.extend(chain_descs) + # 引用消息的文本内容已通过 message_str 处理 + + # 批量处理图片描述(避免多次调用 LLM) + if image_paths: + try: + caption_text = await self._describe_images(event, image_paths, persona) + if caption_text: + descriptions.append( + f"{caption_text}" + ) + except Exception as e: + logger.warning(f"[InternalMindSubStage] 图片描述失败: {e}") + descriptions.append(f"[图片 x{len(image_paths)}]") + + return descriptions + + def _describe_wechat_emoji(self, emoji: WechatEmoji) -> str: + """生成微信表情的文字描述""" + # 优先使用 md5 作为标识 + md5 = getattr(emoji, "md5", None) + if md5: + return f"微信表情包表情 (md5={md5[:8]}...)" + cdnurl = getattr(emoji, "cdnurl", None) + if cdnurl: + return f"微信表情包表情 (url={cdnurl[:50]}...)" + return "微信表情包表情" + + async def _describe_images( + self, event: AstrMessageEvent, image_paths: list[str], persona: dict + ) -> str: + """通过 LLM 生成图片描述 + + 优先级:人格配置的 image_caption_model → 全局配置 → 默认正在使用的提供商。 + 参考 AgentMindSubStage 的模型注册模式。 + + Args: + event: 消息事件 + image_paths: 图片本地路径列表 + persona: 高级人格配置 + + Returns: + 图片描述文本,失败时返回空字符串 + """ + plugin_context = self.ctx.plugin_manager.context + + # 1. 尝试获取人格配置的图片描述模型 + llm_config = persona.get("llm_model_config", {}) + img_caption_config = llm_config.get("image_caption_model", {}) or {} + + provider_id = img_caption_config.get("provider_id", "") + model = img_caption_config.get("model", "") + prompt = img_caption_config.get( + "prompt", "请简洁描述这张图片的内容,用一句话概括。" + ) + + # 2. 如果人格未配置,回退到全局配置 + if not provider_id or not model: + cfg = plugin_context.get_config(event.unified_msg_origin) + provider_settings = cfg.get("provider_settings", {}) + provider_id = provider_settings.get("default_image_caption_provider_id", "") + prompt = provider_settings.get( + "image_caption_prompt", "请简洁描述这张图片的内容,用一句话概括。" + ) + + # 3. 如果仍未找到 provider_id,使用默认的正在使用的提供商 + if not provider_id: + prov = plugin_context.get_using_provider(event.unified_msg_origin) + if prov: + provider_id = prov.provider_config.get("id", "") + # 如果人格也没配置 model,则用 provider 的默认 model + if not model: + model = prov.get_model() + else: + logger.warning("[InternalMindSubStage] 未找到可用的图片描述模型") + return "" + + # 4. 获取 Provider 实例 + provider = plugin_context.get_provider_by_id(provider_id) + if not provider: + logger.warning( + f"[InternalMindSubStage] 图片描述 Provider 不存在: {provider_id}" + ) + return "" + + # 5. 调用 LLM 生成描述 + try: + logger.debug( + f"[InternalMindSubStage] 生成图片描述,使用 provider={provider_id}, model={model}" + ) + llm_resp = await provider.text_chat( + prompt=prompt, + image_urls=image_paths, + ) + caption = llm_resp.completion_text or "" + if caption: + logger.debug(f"[InternalMindSubStage] 图片描述结果: {caption[:100]}") + return caption + except Exception as e: + logger.error(f"[InternalMindSubStage] LLM 图片描述调用失败: {e}") + return "" diff --git a/astrbot/core/pipeline/waking_check/stage.py b/astrbot/core/pipeline/waking_check/stage.py index 2dcb840e91..14244707a9 100644 --- a/astrbot/core/pipeline/waking_check/stage.py +++ b/astrbot/core/pipeline/waking_check/stage.py @@ -14,6 +14,43 @@ from ..context import PipelineContext from ..stage import Stage, register_stage + +async def _check_is_advanced_persona( + ctx: PipelineContext, + event: AstrMessageEvent, +) -> bool: + """检查当前会话是否使用高级人格。 + + 高级人格具有自主思考、主动发言等能力,群聊时不需要唤醒词。 + """ + try: + persona_manager = ctx.plugin_manager.context.persona_manager + provider_settings = ctx.astrbot_config.get("provider_settings", {}) + + # 解析当前会话使用的人格 + ( + _persona_id, + persona, + _force_applied, + _use_webchat_special, + ) = await persona_manager.resolve_selected_persona( + umo=event.session, + conversation_persona_id=None, + platform_name=event.get_platform_name(), + provider_settings=provider_settings, + ) + + if persona and persona.get("is_advanced", False): + logger.debug( + f"会话 {event.unified_msg_origin} 使用高级人格 {persona.get('name')},跳过唤醒词检查" + ) + return True + except Exception as e: + logger.debug(f"检查高级人格时出错: {e}") + + return False + + UNIQUE_SESSION_ID_BUILDERS: dict[str, Callable[[AstrMessageEvent], str | None]] = { "aiocqhttp": lambda e: f"{e.get_sender_id()}_{e.get_group_id()}", "slack": lambda e: f"{e.get_sender_id()}_{e.get_group_id()}", @@ -98,10 +135,19 @@ async def process( event.role = "admin" break + # 检查是否是高级人格 - 高级人格在群聊时也不需要唤醒词 + is_advanced_persona = await _check_is_advanced_persona(self.ctx, event) + if is_advanced_persona: + event.is_advanced_persona = True + event.is_wake = True + event.is_at_or_wake_command = True + logger.debug( + f"高级人格模式激活,会话 {event.unified_msg_origin} 无需唤醒词" + ) # 检查 wake wake_prefixes = self.ctx.astrbot_config["wake_prefix"] messages = event.get_messages() - is_wake = False + is_wake = is_advanced_persona # 高级人格已唤醒 for wake_prefix in wake_prefixes: if event.message_str.startswith(wake_prefix): if ( diff --git a/astrbot/core/pipeline/whitelist_check/stage.py b/astrbot/core/pipeline/whitelist_check/stage.py index ea9c55228e..f0bc1fbce2 100644 --- a/astrbot/core/pipeline/whitelist_check/stage.py +++ b/astrbot/core/pipeline/whitelist_check/stage.py @@ -36,9 +36,9 @@ async def process( # 白名单检查未启用 return - if len(self.whitelist) == 0: - # 白名单为空,不检查 - return + # if len(self.whitelist) == 0: + # 白名单为空,不检查,只要启动白名单就要检查 + # return if event.get_platform_name() == "webchat": # WebChat 豁免 diff --git a/astrbot/core/platform/astr_message_event.py b/astrbot/core/platform/astr_message_event.py index 82c03dbb0d..39741946d4 100644 --- a/astrbot/core/platform/astr_message_event.py +++ b/astrbot/core/platform/astr_message_event.py @@ -95,6 +95,9 @@ def __init__( self.plugins_name: list[str] | None = None """该事件启用的插件名称列表。None 表示所有插件都启用。空列表表示没有启用任何插件。""" + self.is_advanced_persona: bool = False + """是否使用高级人格。高级人格具有自主思考、主动发言等能力。""" + # back_compability self.platform = platform_meta diff --git a/astrbot/core/platform/sources/webchat/webchat_event.py b/astrbot/core/platform/sources/webchat/webchat_event.py index bc1e1a6bcd..08aa4605c2 100644 --- a/astrbot/core/platform/sources/webchat/webchat_event.py +++ b/astrbot/core/platform/sources/webchat/webchat_event.py @@ -181,17 +181,21 @@ async def send_streaming(self, generator, use_fallback: bool = False) -> None: await web_chat_back_queue.put(payload) continue - # if chain.type == "break" and final_data: - # # 分割符 - # await web_chat_back_queue.put( - # { - # "type": "break", # break means a segment end - # "data": final_data, - # "streaming": True, - # }, - # ) - # final_data = "" - # continue + # 处理 break 信号(高级人格多条回复分隔符) + if chain.type == "break" and final_data: + # 发送 break 信号,让前端知道这是一条独立的消息 + await web_chat_back_queue.put( + { + "type": "break", # break means a segment end + "data": final_data, + "reasoning": reasoning_content, + "streaming": True, + "message_id": message_id, + }, + ) + final_data = "" + reasoning_content = "" + continue r = await WebChatMessageEvent._send( message_id=message_id, diff --git a/astrbot/dashboard/routes/persona.py b/astrbot/dashboard/routes/persona.py index 56c14fe617..35472436bb 100644 --- a/astrbot/dashboard/routes/persona.py +++ b/astrbot/dashboard/routes/persona.py @@ -61,6 +61,11 @@ async def list_personas(self): "custom_error_message": persona.custom_error_message, "folder_id": persona.folder_id, "sort_order": persona.sort_order, + "personality_config": persona.personality_config, + "chat_config": persona.chat_config, + "robot_config": persona.robot_config, + "llm_model_config": persona.llm_model_config, + "is_advanced": persona.is_advanced, "created_at": persona.created_at.isoformat() if persona.created_at else None, @@ -102,6 +107,11 @@ async def get_persona_detail(self): "custom_error_message": persona.custom_error_message, "folder_id": persona.folder_id, "sort_order": persona.sort_order, + "personality_config": persona.personality_config, + "chat_config": persona.chat_config, + "robot_config": persona.robot_config, + "llm_model_config": persona.llm_model_config, + "is_advanced": persona.is_advanced, "created_at": persona.created_at.isoformat() if persona.created_at else None, @@ -128,6 +138,12 @@ async def create_persona(self): custom_error_message = data.get("custom_error_message") folder_id = data.get("folder_id") # None 表示根目录 sort_order = data.get("sort_order", 0) + # 高级人格配置 + personality_config = data.get("personality_config") + chat_config = data.get("chat_config") + robot_config = data.get("robot_config") + llm_model_config = data.get("llm_model_config") + is_advanced = data.get("is_advanced", False) if not persona_id: return Response().error("人格ID不能为空").__dict__ @@ -157,6 +173,11 @@ async def create_persona(self): custom_error_message=custom_error_message, folder_id=folder_id, sort_order=sort_order, + personality_config=personality_config, + chat_config=chat_config, + robot_config=robot_config, + llm_model_config=llm_model_config, + is_advanced=is_advanced, ) return ( @@ -173,6 +194,11 @@ async def create_persona(self): "custom_error_message": persona.custom_error_message, "folder_id": persona.folder_id, "sort_order": persona.sort_order, + "personality_config": persona.personality_config, + "chat_config": persona.chat_config, + "robot_config": persona.robot_config, + "llm_model_config": persona.llm_model_config, + "is_advanced": persona.is_advanced, "created_at": persona.created_at.isoformat() if persona.created_at else None, @@ -203,6 +229,17 @@ async def update_persona(self): skills = data.get("skills") has_custom_error_message = "custom_error_message" in data custom_error_message = data.get("custom_error_message") + # 高级人格配置 + has_personality_config = "personality_config" in data + personality_config = data.get("personality_config") + has_chat_config = "chat_config" in data + chat_config = data.get("chat_config") + has_robot_config = "robot_config" in data + robot_config = data.get("robot_config") + has_llm_model_config = "llm_model_config" in data + llm_model_config = data.get("llm_model_config") + has_is_advanced = "is_advanced" in data + is_advanced = data.get("is_advanced") if not persona_id: return Response().error("缺少必要参数: persona_id").__dict__ @@ -234,6 +271,16 @@ async def update_persona(self): update_kwargs["skills"] = skills if has_custom_error_message: update_kwargs["custom_error_message"] = custom_error_message + if has_personality_config: + update_kwargs["personality_config"] = personality_config + if has_chat_config: + update_kwargs["chat_config"] = chat_config + if has_robot_config: + update_kwargs["robot_config"] = robot_config + if has_llm_model_config: + update_kwargs["llm_model_config"] = llm_model_config + if has_is_advanced: + update_kwargs["is_advanced"] = is_advanced await self.persona_mgr.update_persona(**update_kwargs) diff --git a/dashboard/src/components/shared/AdvancedPersonaPage.vue b/dashboard/src/components/shared/AdvancedPersonaPage.vue new file mode 100644 index 0000000000..3ce76e4652 --- /dev/null +++ b/dashboard/src/components/shared/AdvancedPersonaPage.vue @@ -0,0 +1,787 @@ + + + + + diff --git a/dashboard/src/components/shared/advanced/ChatSection.vue b/dashboard/src/components/shared/advanced/ChatSection.vue new file mode 100644 index 0000000000..c675bed95c --- /dev/null +++ b/dashboard/src/components/shared/advanced/ChatSection.vue @@ -0,0 +1,120 @@ + + + diff --git a/dashboard/src/components/shared/advanced/DialogsSection.vue b/dashboard/src/components/shared/advanced/DialogsSection.vue new file mode 100644 index 0000000000..2f64f59db3 --- /dev/null +++ b/dashboard/src/components/shared/advanced/DialogsSection.vue @@ -0,0 +1,87 @@ + + + diff --git a/dashboard/src/components/shared/advanced/ModelConfigSection.vue b/dashboard/src/components/shared/advanced/ModelConfigSection.vue new file mode 100644 index 0000000000..cab01cb149 --- /dev/null +++ b/dashboard/src/components/shared/advanced/ModelConfigSection.vue @@ -0,0 +1,300 @@ + + + diff --git a/dashboard/src/components/shared/advanced/ModelSelector.vue b/dashboard/src/components/shared/advanced/ModelSelector.vue new file mode 100644 index 0000000000..9ecf717c2b --- /dev/null +++ b/dashboard/src/components/shared/advanced/ModelSelector.vue @@ -0,0 +1,275 @@ + + + + + diff --git a/dashboard/src/components/shared/advanced/PersonalitySection.vue b/dashboard/src/components/shared/advanced/PersonalitySection.vue new file mode 100644 index 0000000000..53e96de30f --- /dev/null +++ b/dashboard/src/components/shared/advanced/PersonalitySection.vue @@ -0,0 +1,157 @@ + + + diff --git a/dashboard/src/components/shared/advanced/RobotSection.vue b/dashboard/src/components/shared/advanced/RobotSection.vue new file mode 100644 index 0000000000..80b2e3c215 --- /dev/null +++ b/dashboard/src/components/shared/advanced/RobotSection.vue @@ -0,0 +1,127 @@ + + + diff --git a/dashboard/src/i18n/locales/en-US/features/persona.json b/dashboard/src/i18n/locales/en-US/features/persona.json index 84aaef52c6..b2daa81852 100644 --- a/dashboard/src/i18n/locales/en-US/features/persona.json +++ b/dashboard/src/i18n/locales/en-US/features/persona.json @@ -4,6 +4,7 @@ }, "buttons": { "create": "Create Persona", + "createAdvanced": "Advanced Persona (Beta)", "createFirst": "Create First Persona", "edit": "Edit", "delete": "Delete", @@ -142,5 +143,99 @@ "description": "Select a destination folder for \"{name}\"", "success": "Moved successfully", "error": "Failed to move" + }, + "advancedPersona": { + "back": "Back", + "createTitle": "Create Advanced Persona", + "editTitle": "Edit Advanced Persona", + "subtitle": "Configure advanced persona settings", + "betaTag": "Beta", + "basicInfo": "Basic Info", + "fillDefault": "Fill MaiBot Default Config", + "form": { + "personaId": "Persona ID", + "systemPrompt": "System Prompt", + "customErrorMessage": "Custom Error Reply Message" + }, + "tools": { + "title": "Tool Selection", + "all": "Use All Tools", + "select": "Select Specific Tools", + "search": "Search Tools" + }, + "skills": { + "title": "Skills Selection", + "all": "Use All Skills", + "select": "Select Specific Skills", + "search": "Search Skills", + "noSkillsAvailable": "No skills available", + "noSkillsFound": "No matching skills found", + "loading": "Loading skills..." + }, + "tabs": { + "personality": "Personality", + "chat": "Chat Settings", + "robot": "Robot Account", + "dialogs": "Preset Dialogs" + }, + "personality": { + "traits": "Personality Traits", + "traitsHint": "Describe personality traits, e.g., gentle, introverted, lively", + "expressionStyle": "Expression Style", + "expressionStyleHint": "Describe the bot's speaking style, e.g., concise, humorous, formal", + "recognitionRules": "Recognition Rules", + "recognitionRulesHint": "Define image recognition and processing rules", + "moodTags": "Mood Tags", + "moodTagsHint": "Set mood tags and their usage weights, total should be 100%", + "addMoodTag": "Add Tag", + "tagName": "Tag Name", + "tagWeight": "Weight", + "totalWeight": "Total Weight", + "weightWarning": "Total weight should be 100%" + }, + "chat": { + "chatFrequency": "Chat Frequency", + "chatFrequencyHint": "Control how often the bot initiates chats", + "dynamicFrequency": "Dynamic Response Frequency", + "dynamicFrequencyHint": "Dynamically adjust response frequency based on context", + "timeBasedMode": "Time-based Mode", + "timeBasedModeHint": "Select different response strategies based on current time", + "messageLength": "Message Length", + "messageLengthHint": "Control the length of generated messages", + "frequencyOptions": { + "silent": "Silent", + "low": "Low", + "normal": "Normal", + "high": "High", + "veryHigh": "Very High" + }, + "dynamicOptions": { + "auto": "Auto", + "fixed": "Fixed", + "random": "Random" + }, + "lengthOptions": { + "short": "Short", + "normal": "Normal", + "long": "Long", + "adaptive": "Adaptive" + } + }, + "robot": { + "nickname": "Nickname", + "nicknameHint": "Bot's display nickname", + "aliases": "Aliases", + "aliasesHint": "Other names for the bot, separated by enter", + "addAlias": "Add Alias", + "alias": "Alias", + "platforms": "Enabled Platforms", + "platformsHint": "Select platforms the bot should respond to" + }, + "dialogs": { + "help": "Add preset dialogs to help the bot better understand the role settings", + "userMessage": "User Message", + "assistantMessage": "Assistant Message", + "addPair": "Add Dialog Pair" + } } } diff --git a/dashboard/src/i18n/locales/zh-CN/features/persona.json b/dashboard/src/i18n/locales/zh-CN/features/persona.json index d3eec49a57..6b2a044e5b 100644 --- a/dashboard/src/i18n/locales/zh-CN/features/persona.json +++ b/dashboard/src/i18n/locales/zh-CN/features/persona.json @@ -4,6 +4,7 @@ }, "buttons": { "create": "创建人格", + "createAdvanced": "思想人格", "createFirst": "创建第一个人格", "edit": "编辑", "delete": "删除", @@ -142,5 +143,99 @@ "description": "为 \"{name}\" 选择目标文件夹", "success": "移动成功", "error": "移动失败" + }, + "advancedPersona": { + "back": "返回", + "createTitle": "思想人格", + "editTitle": "编辑思想人格", + "subtitle": "配置思想人格的详细设置", + "betaTag": "测试版", + "basicInfo": "基础信息", + "fillDefault": "填充麦麦默认配置", + "form": { + "personaId": "人格 ID", + "systemPrompt": "系统提示词", + "customErrorMessage": "自定义报错回复信息" + }, + "tools": { + "title": "工具选择", + "all": "使用全部工具", + "select": "选择指定工具", + "search": "搜索工具" + }, + "skills": { + "title": "Skills 选择", + "all": "使用全部 Skills", + "select": "选择指定 Skills", + "search": "搜索 Skills", + "noSkillsAvailable": "暂无可用的 Skills", + "noSkillsFound": "未找到匹配的 Skills", + "loading": "加载 Skills 中..." + }, + "tabs": { + "personality": "人格设置", + "chat": "聊天设置", + "robot": "机器人账号", + "dialogs": "预设对话" + }, + "personality": { + "traits": "人格特质", + "traitsHint": "描述人格的性格特点,如:温柔、内向、活泼等", + "expressionStyle": "表达风格", + "expressionStyleHint": "描述机器人的说话风格,如:简洁、幽默、正式等", + "recognitionRules": "识别规则", + "recognitionRulesHint": "定义图片识别和处理的规则", + "moodTags": "心情标签", + "moodTagsHint": "设置心情标签及其使用权重,总和应为100%", + "addMoodTag": "添加标签", + "tagName": "标签名", + "tagWeight": "权重", + "totalWeight": "总权重", + "weightWarning": "权重总和应为100%" + }, + "chat": { + "chatFrequency": "聊天频率", + "chatFrequencyHint": "控制机器人主动聊天的频率", + "dynamicFrequency": "动态发言频率", + "dynamicFrequencyHint": "根据上下文动态调整发言频率", + "timeBasedMode": "根据时间选择", + "timeBasedModeHint": "根据当前时间选择不同的回复策略", + "messageLength": "消息条数长度", + "messageLengthHint": "控制生成消息的长度", + "frequencyOptions": { + "silent": "沉默", + "low": "低", + "normal": "正常", + "high": "高", + "veryHigh": "非常高" + }, + "dynamicOptions": { + "auto": "自动", + "fixed": "固定", + "random": "随机" + }, + "lengthOptions": { + "short": "短", + "normal": "正常", + "long": "长", + "adaptive": "自适应" + } + }, + "robot": { + "nickname": "昵称", + "nicknameHint": "机器人的显示昵称", + "aliases": "别名", + "aliasesHint": "机器人的其他称呼,多个用回车分隔", + "addAlias": "添加别名", + "alias": "别名", + "platforms": "启用的平台", + "platformsHint": "选择机器人需要响应的平台" + }, + "dialogs": { + "help": "添加预设对话来帮助机器人更好地理解角色设定", + "userMessage": "用户消息", + "assistantMessage": "AI 回答", + "addPair": "添加对话对" + } } } diff --git a/dashboard/src/router/MainRoutes.ts b/dashboard/src/router/MainRoutes.ts index 109122dd86..378f7b496f 100644 --- a/dashboard/src/router/MainRoutes.ts +++ b/dashboard/src/router/MainRoutes.ts @@ -71,6 +71,17 @@ const MainRoutes = { path: '/persona', component: () => import('@/views/PersonaPage.vue') }, + { + name: 'AdvancedPersona', + path: '/persona/advanced', + component: () => import('@/components/shared/AdvancedPersonaPage.vue') + }, + { + name: 'AdvancedPersonaEdit', + path: '/persona/advanced/:personaId', + component: () => import('@/components/shared/AdvancedPersonaPage.vue'), + props: true + }, { name: 'SubAgent', path: '/subagent', diff --git a/dashboard/src/views/persona/PersonaManager.vue b/dashboard/src/views/persona/PersonaManager.vue index 8ad581779f..07806642d4 100644 --- a/dashboard/src/views/persona/PersonaManager.vue +++ b/dashboard/src/views/persona/PersonaManager.vue @@ -32,6 +32,10 @@ rounded="lg"> {{ tm('buttons.create') }} + + {{ tm('buttons.createAdvanced') }} + {{ tm('folder.createButton') }} @@ -421,7 +425,15 @@ export default defineComponent({ this.showPersonaDialog = true; }, + openCreateAdvancedPersonaDialog() { + this.$router.push('/persona/advanced'); + }, + editPersona(persona: Persona) { + if (persona.is_advanced) { + this.$router.push(`/persona/advanced/${encodeURIComponent(persona.persona_id)}`); + return; + } this.editingPersona = persona; this.showPersonaDialog = true; }, @@ -433,6 +445,11 @@ export default defineComponent({ openEditFromViewDialog() { if (!this.viewingPersona) return; + if (this.viewingPersona.is_advanced) { + this.showViewDialog = false; + this.$router.push(`/persona/advanced/${encodeURIComponent(this.viewingPersona.persona_id)}`); + return; + } this.editingPersona = this.viewingPersona; this.showViewDialog = false; this.showPersonaDialog = true;