diff --git a/src/google/adk/evaluation/eval_config.py b/src/google/adk/evaluation/eval_config.py index 185f02cc20..ead2303ceb 100644 --- a/src/google/adk/evaluation/eval_config.py +++ b/src/google/adk/evaluation/eval_config.py @@ -89,7 +89,7 @@ class EvalConfig(BaseModel): In the sample below, `tool_trajectory_avg_score`, `response_match_score` and `final_response_match_v2` are the standard eval metric names, represented as keys in the dictionary. The values in the dictionary are the corresponding -criterions. For the first two metrics, we use simple threshold as the criterion, +criteria. For the first two metrics, we use simple threshold as the criterion, the third one uses `LlmAsAJudgeCriterion`. { "criteria": { diff --git a/src/google/adk/evaluation/simulation/per_turn_user_simulator_quality_v1.py b/src/google/adk/evaluation/simulation/per_turn_user_simulator_quality_v1.py index 5d6208a10a..4cf2381b86 100644 --- a/src/google/adk/evaluation/simulation/per_turn_user_simulator_quality_v1.py +++ b/src/google/adk/evaluation/simulation/per_turn_user_simulator_quality_v1.py @@ -58,7 +58,7 @@ # Definition of Conversation History The Conversation History is the actual dialogue between the User Simulator and the Agent. -The Conversation History may not be complete, but the exsisting dialogue should adhere to the Conversation Plan. +The Conversation History may not be complete, but the existing dialogue should adhere to the Conversation Plan. The Conversation History may contain instances where the User Simulator troubleshoots an incorrect/inappropriate response from the Agent in order to enforce the Conversation Plan. The Conversation History is finished only when the User Simulator outputs `{stop_signal}` in its response. If this token is missing, the conversation between the User Simulator and the Agent has not finished, and more turns can be generated. diff --git a/src/google/adk/flows/llm_flows/contents.py b/src/google/adk/flows/llm_flows/contents.py index 046a443a25..1cc9f754d4 100644 --- a/src/google/adk/flows/llm_flows/contents.py +++ b/src/google/adk/flows/llm_flows/contents.py @@ -220,7 +220,7 @@ def _rearrange_events_for_latest_function_response( def _is_part_invisible(p: types.Part) -> bool: - """Returns whether a part is invisible for LLM context.""" + """A part is considered invisible if it's a thought, or has no visible content.""" return getattr(p, 'thought', False) or not ( p.text or p.inline_data diff --git a/src/google/adk/memory/vertex_ai_rag_memory_service.py b/src/google/adk/memory/vertex_ai_rag_memory_service.py index bd6e9dc2b0..001b30b541 100644 --- a/src/google/adk/memory/vertex_ai_rag_memory_service.py +++ b/src/google/adk/memory/vertex_ai_rag_memory_service.py @@ -52,7 +52,7 @@ def __init__( or ``{rag_corpus_id}`` similarity_top_k: The number of contexts to retrieve. vector_distance_threshold: Only returns contexts with vector distance - smaller than the threshold.. + smaller than the threshold. """ self._vertex_rag_store = types.VertexRagStore( rag_resources=[ diff --git a/src/google/adk/runners.py b/src/google/adk/runners.py index b931561c4d..ddb00814ab 100644 --- a/src/google/adk/runners.py +++ b/src/google/adk/runners.py @@ -770,7 +770,7 @@ async def _exec_with_plugin( # identified by checking if the transcription event is partial. When # the next transcription event is not partial, it means the previous # transcription is finished. Then if there is any buffered function - # call event, we should append them after this finished(non-parital) + # call event, we should append them after this finished(non-partial) # transcription event. buffered_events: list[Event] = [] is_transcribing: bool = False @@ -789,7 +789,7 @@ async def _exec_with_plugin( buffered_events.append(event) continue # Note for live/bidi: for audio response, it's considered as - # non-paritla event(event.partial=None) + # non-partial event(event.partial=None) # event.partial=False and event.partial=None are considered as # non-partial event; event.partial=True is considered as partial # event. @@ -937,7 +937,7 @@ async def run_live( **Events Yielded to Callers:** * **Live Model Audio Events with Inline Data:** Events containing raw audio `Blob` data(`inline_data`). - * **Live Model Audio Events with File Data:** Both input and ouput audio + * **Live Model Audio Events with File Data:** Both input and output audio data are aggregated into a audio file saved into artifacts. The reference to the file is saved in the event as `file_data`. * **Usage Metadata:** Events containing token usage.