import os
from dotenv import load_dotenv, find_dotenv
from langchain_community.vectorstores import Chroma
from fastapi import FastAPI
from langchain_openai import OpenAIEmbeddings, ChatOpenAI

app = FastAPI()

# Load environment variables
load_dotenv(find_dotenv())

# Initialize Chroma vector store
# persist_directory = os.getenv('PERSIST_DIRECTORY', 'docs/chroma/')
embedding_model = os.getenv('EMBEDDING_MODEL', 'text-embedding-3-large')
persist_directory = os.environ["PERSIST_DIRECTORY"]
vectordb = Chroma(persist_directory=persist_directory, embedding_function=embedding_model)

# Debug: Print environment variables
print("Persist Directory:", persist_directory)
print("Embedding Model:", embedding_model)

# Debugging endpoint
@app.get("/debug")
async def debug_endpoint(que:str):
    try:
        # Perform a test operation (e.g., similarity search) to check functionality
        # test_query = que
        embedding_vector = OpenAIEmbeddings(model="text-embedding-3-large").embed_query(que)
        docs = vectordb.similarity_search_by_vector(embedding_vector)
        # context_results = vectordb.similarity_search(test_query, k=1)
        print("Context Results:", docs)

        return {"message": "Debugging successful. Check console for output."}

    except Exception as e:
        return {"error": str(e)}

if __name__ == "__main__":
    import uvicorn
    uvicorn.run(app, host="localhost", port=8000)
