fix approx tokens call

This commit is contained in:
EC2 Default User 2025-07-24 17:57:39 +00:00
parent 86c96da1b9
commit 907375eee5
3 changed files with 5 additions and 10 deletions

View File

@ -180,6 +180,7 @@ async def observe(
session_id: UUID to group observations from same conversation
agent_model: AI model making observations (for quality tracking)
"""
logger.info("MCP: Observing")
tasks = [
(
observation,
@ -237,6 +238,7 @@ async def search_observations(
Returns: List with content, tags, created_at, metadata
Results sorted by relevance to your query.
"""
logger.info("MCP: Searching observations for %s", query)
semantic_text = observation.generate_semantic_text(
subject=subject or "",
observation_type="".join(observation_types or []),
@ -297,6 +299,7 @@ async def create_note(
confidences: Dict of scores (0.0-1.0), e.g. {"observation_accuracy": 0.9}
tags: Organization tags for filtering and discovery
"""
logger.info("MCP: creating note: %s", subject)
if filename:
path = pathlib.Path(filename)
if not path.is_absolute():

View File

@ -108,10 +108,3 @@ async def get_authenticated_user() -> dict:
"client_id": access_token.client_id,
"user": user_info,
}
@mcp.tool()
async def send_response(response: str) -> dict:
"""Send a response to the user."""
logger.info(f"Sending response: {response}")
return {"response": response}

View File

@ -28,7 +28,7 @@ from sqlalchemy.dialects.postgresql import BYTEA
from sqlalchemy.orm import Session, relationship
from sqlalchemy.types import Numeric
from memory.common import settings
from memory.common import settings, tokens
import memory.common.extract as extract
import memory.common.collections as collections
import memory.common.chunker as chunker
@ -125,8 +125,7 @@ def chunk_mixed(content: str, image_paths: Sequence[str]) -> list[extract.DataCh
)
chunks: list[extract.DataChunk] = [full_text]
tokens = chunker.approx_token_count(content)
if tokens > chunker.DEFAULT_CHUNK_TOKENS * 2:
if tokens.approx_token_count(content) > chunker.DEFAULT_CHUNK_TOKENS * 2:
chunks += [
extract.DataChunk(data=add_pics(c, images), metadata={"tags": tags})
for c in chunker.chunk_text(content)