Documentation
Search Types
Types for AI-powered search operations
ImageSearchAgentResult
Complete result from AI-powered image search
python
@dataclass(frozen=True)class ImageSearchAgentResult: success: bool # Whether search succeeded results: list[ImageSearchResultItem] # Full result objects count: int # Total results found result_ids: list[str] # Flat list of image IDs summary: str # Human-readable summary (refs resolved) summary_raw: str # Original with [[ref:X]] patterns result_refs: dict[str, ResultRefData] # For interactive UI building search_mode: str # Search mode used (e.g. "hybrid") execution_time_ms: int # Total execution time iterations: int # Reasoning iterations performed search_strategy: Optional[dict[str, Any]] = None # Strategy metadata token_usage: Optional[dict[str, int]] = None # LLM token usage
@classmethod def from_api_response(cls, data: dict) -> "ImageSearchAgentResult": ...
def as_collection(self) -> FileCollection: """Convert results to a FileCollection for agent data flow.""" ...DocumentSearchAgentResult
Complete result from AI-powered document search
python
@dataclass(frozen=True)class DocumentSearchAgentResult: success: bool # Whether search succeeded results: list[DocumentChunkResultItem] # Full chunk objects count: int # Total chunks found chunk_ids: list[str] # Flat list of chunk IDs document_ids: list[str] # Unique document IDs in results summary: str # Human-readable summary summary_raw: str # Original with [[ref:X]] patterns result_refs: dict[str, ResultRefData] # For interactive UI building search_mode: str # Search mode used execution_time_ms: int # Total execution time iterations: int # Reasoning iterations performed search_strategy: Optional[dict[str, Any]] = None # Strategy metadata token_usage: Optional[dict[str, int]] = None # LLM token usage
@classmethod def from_api_response(cls, data: dict) -> "DocumentSearchAgentResult": ...
def as_collection(self, by: str = "document") -> FileCollection: """Convert results to a FileCollection. by='document' uses document IDs, by='chunk' uses chunk IDs.""" ...ImageSearchResultItem
Individual image result from agent search
python
@dataclass(frozen=True)class ImageSearchResultItem: image_id: str # Image UUID score: float # Relevance score (0-1) filename: Optional[str] = None # Original filename title: Optional[str] = None # Image title description: Optional[str] = None # AI-generated description folder_id: Optional[str] = None # Parent folder ID thumbnail_url: Optional[str] = None # Thumbnail URL features: Optional[list[dict[str, Any]]] = None # Detected objects/features
@classmethod def from_api_response(cls, data: dict) -> "ImageSearchResultItem": ...DocumentChunkResultItem
Individual document chunk result from agent search
python
@dataclass(frozen=True)class DocumentChunkResultItem: chunk_id: str # Chunk UUID document_id: str # Parent document UUID document_filename: str # Document filename text: str # Chunk text content score: float # Relevance score (0-1) page_numbers: Optional[list[int]] = None # Page numbers for this chunk chunk_index: Optional[int] = None # Position in document
@classmethod def from_api_response(cls, data: dict) -> "DocumentChunkResultItem": ...ResultRefData
Reference data for building interactive UI elements from [[ref:X]] patterns
python
@dataclass(frozen=True)class ResultRefData: count: int # Number of items ids: list[str] # Item IDs (images, chunks, or documents) image_ids: list[str] # Backward compat: image IDs when id_type='image' id_type: str # "image", "document", or "chunk" label: str # Human-readable label (e.g., "15 images")
@classmethod def from_api_response(cls, data: dict) -> "ResultRefData": ...MatchedSceneItem
Individual scene match within a video result — includes start/end timestamps so you can deep-link into the moment.
python
@dataclass(frozen=True)class MatchedSceneItem: scene_id: str scene_index: int # Zero-based scene index within the video start_time: float # Seconds from start end_time: float # Seconds from start time_range_formatted: str # e.g. "00:07:03-00:07:11" description: str # AI-generated scene description score: float # Relevance score (0-1) tags: Optional[list[str]] = None thumbnail_url: Optional[str] = None
@classmethod def from_api_response(cls, data: dict) -> "MatchedSceneItem": ...VideoSearchResultItem
Individual video result with matched scenes
python
@dataclass(frozen=True)class VideoSearchResultItem: video_id: str video_filename: str score: float # Relevance score (0-1) video_description: str = "" duration_seconds: Optional[float] = None resolution: Optional[str] = None # e.g. "1920x1080" thumbnail_url: Optional[str] = None matched_scenes: Optional[list[MatchedSceneItem]] = None
@classmethod def from_api_response(cls, data: dict) -> "VideoSearchResultItem": ...VideoSearchAgentResult
Complete result from AI-powered video search
python
@dataclass(frozen=True)class VideoSearchAgentResult: success: bool results: list[VideoSearchResultItem] # Videos with matched_scenes count: int # Total videos found video_ids: list[str] # Flat list of video IDs summary: str # Human-readable summary summary_raw: str # Original with [[ref:X]] patterns result_refs: dict[str, ResultRefData] # For interactive UI building search_mode: str # Search mode used execution_time_ms: int iterations: int search_strategy: Optional[dict[str, Any]] = None token_usage: Optional[dict[str, int]] = None
@classmethod def from_api_response(cls, data: dict) -> "VideoSearchAgentResult": ...
def as_collection(self) -> FileCollection: """Convert results to a FileCollection of video refs.""" ...
