diff --git a/src/memory/api/MCP/memory.py b/src/memory/api/MCP/memory.py index 0eff43b..bcda67f 100644 --- a/src/memory/api/MCP/memory.py +++ b/src/memory/api/MCP/memory.py @@ -180,6 +180,7 @@ async def observe( session_id: UUID to group observations from same conversation agent_model: AI model making observations (for quality tracking) """ + logger.info("MCP: Observing") tasks = [ ( observation, @@ -237,6 +238,7 @@ async def search_observations( Returns: List with content, tags, created_at, metadata Results sorted by relevance to your query. """ + logger.info("MCP: Searching observations for %s", query) semantic_text = observation.generate_semantic_text( subject=subject or "", observation_type="".join(observation_types or []), @@ -297,6 +299,7 @@ async def create_note( confidences: Dict of scores (0.0-1.0), e.g. {"observation_accuracy": 0.9} tags: Organization tags for filtering and discovery """ + logger.info("MCP: creating note: %s", subject) if filename: path = pathlib.Path(filename) if not path.is_absolute(): diff --git a/src/memory/api/MCP/tools.py b/src/memory/api/MCP/tools.py index c5bcf2a..d0d8b82 100644 --- a/src/memory/api/MCP/tools.py +++ b/src/memory/api/MCP/tools.py @@ -108,10 +108,3 @@ async def get_authenticated_user() -> dict: "client_id": access_token.client_id, "user": user_info, } - - -@mcp.tool() -async def send_response(response: str) -> dict: - """Send a response to the user.""" - logger.info(f"Sending response: {response}") - return {"response": response} diff --git a/src/memory/common/db/models/source_item.py b/src/memory/common/db/models/source_item.py index 42d2280..a2f93c2 100644 --- a/src/memory/common/db/models/source_item.py +++ b/src/memory/common/db/models/source_item.py @@ -28,7 +28,7 @@ from sqlalchemy.dialects.postgresql import BYTEA from sqlalchemy.orm import Session, relationship from sqlalchemy.types import Numeric -from memory.common import settings +from memory.common import settings, tokens import memory.common.extract as extract import memory.common.collections as collections import memory.common.chunker as chunker @@ -125,8 +125,7 @@ def chunk_mixed(content: str, image_paths: Sequence[str]) -> list[extract.DataCh ) chunks: list[extract.DataChunk] = [full_text] - tokens = chunker.approx_token_count(content) - if tokens > chunker.DEFAULT_CHUNK_TOKENS * 2: + if tokens.approx_token_count(content) > chunker.DEFAULT_CHUNK_TOKENS * 2: chunks += [ extract.DataChunk(data=add_pics(c, images), metadata={"tags": tags}) for c in chunker.chunk_text(content)