from logging import Logger
from phi.llm.ollama import Ollama
from phi.embedder.ollama import OllamaEmbedder
from phi.vectordb.distance import Distance
from phi.agent import Agent
from phi.knowledge.pdf import PDFUrlKnowledgeBase
from phi.vectordb.pgvector import PgVector, SearchType
常量定义
DB_URL = “tmp/lancedb”
PDF_DIR = “tmp/pdfs”
MODEL_NAME = “qqwen2.5:14b”
创建 Ollama LLM 实例
ollama = Ollama(model=“qwen2.5:14b”)
embedder = OllamaEmbedder(model=MODEL_NAME)
db_url = “postgresql+psycopg://test:test@localhost:5432/test”
knowledge_base = PDFUrlKnowledgeBase(
# Read PDF from this URL
urls=[“https://phi-public.s3.amazonaws.com/recipes/ThaiRecipes.pdf”],
# Store embeddings in the ai.recipes
table
vector_db=PgVector(
schema=“test”, table_name=“recipes”, db_url=db_url, search_type=SearchType.hybrid,embedder = embedder, distance = Distance.cosine,
),
)
Load the knowledge base: Comment after first run
knowledge_base.load(upsert=True)
agent = Agent(
model=Ollama(id=“qwen2.5:3b”),
knowledge=knowledge_base,
# Enable RAG by adding references from AgentKnowledge to the user prompt.
enable_rag=True,
# Set as False because Agents default to search_knowledge=True
search_knowledge=False,
markdown=True,
debug_mode=True,
)
–
sqlalchemy.exc.StatementError: (builtins.ValueError) expected 4096 dimensions, not 768