Build a Real-Time AI Fact Checker with Two Separate Web-Search Models
LangGraph + Streamlit
This tutorial shows you how to build a real-time AI fact-checking web app using LangGraph, Streamlit, and two separate API web-connected AI models: OpenAI's GPT-4o and Perplexity's Sonar Reasoning, which is a souped up DeepSeek-R1 model. Enter any claim, like "The Moon landing was staged," and the system coordinates a multi-agent workflow to independently investigate and cross-check sources online. A final agent compares the reasoning from each model and delivers a clear verdict, reducing single AI hallucinations and improving accuracy.
This video gives a step-by-step walkthrough of how to create the structured graph workflow, from defining nodes and conditional paths in LangGraph to integrating language models via LangChain. The video covers key Python libraries (Pydantic, LangChain, LangGraph, Streamlit), environment setup, and API key management.
You can modify this code for real-time verification tasks such as live fact-checking or validating AI-generated content. This approach to fact checking provides a powerful, lightweight framework to enhance the reliability and transparency of AI systems.
Environment
# Example environment setup using Conda
# Create a new conda environment with Python 3. 10.16
conda create -n factcheck_env python=3.10.16 -y
# Activate the new environment
conda activate factcheck_env
# Install required Python packages with pinned versions
pip install streamlit==1.43.2 langgraph==0.3.18 langchain==0.3.21 langchain-openai==0.3.9 langchain-community==0.3.20 langchain-core==0.3.46 openai==1.67.0 typing-extensions==4.12.2Full Code
##################################
## Fact Checking Agent Workflow ##
##################################
# (Section 1: Import Libraries) ----------
import streamlit as st
from langgraph.graph import StateGraph, START, END
from langchain_openai import ChatOpenAI
from langchain_community.chat_models.perplexity import ChatPerplexity
from langchain_core.messages import HumanMessage
from pydantic import BaseModel, ValidationError
from typing import TypedDict, List, Literal
import os, json
# (Section 2: Set API Keys and Instantiate LLMs) ----------
# Either set your keys directly here or ensure they are in os.environ.
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", "YOUR OPENAI API KEY GOES HERE")
PERPLEXITY_API_KEY = os.getenv("PERPLEXITY_API_KEY", "YOUR PERPLEXITY SONAR API KEY GOES HERE")
planner_llm = ChatOpenAI(model_name="gpt-4o", temperature=0.2, api_key=OPENAI_API_KEY)
adjudicator_llm = ChatOpenAI(model_name="gpt-4o", temperature=0.2, api_key=OPENAI_API_KEY)
openai_agent_llm = ChatOpenAI(model_name="gpt-4o", temperature=0.2, api_key=OPENAI_API_KEY)
tool = {"type": "web_search_preview"}
openai_agent_llm = openai_agent_llm.bind_tools([tool])
perplexity_agent_llm = ChatPerplexity(model="sonar-reasoning", temperature=0.2, pplx_api_key=PERPLEXITY_API_KEY)
# (Section 3: Define Pydantic Model and State Structure) ----------
class AdjudicationResult(BaseModel):
final_verdict: Literal["Claim is True", "Claim is False", "Re-run verification"]
evidence_summary: str = ""
citations: dict = {}
class FactCheckState(TypedDict):
query: str
plan: str
perplexity_result: str
openai_result: str
adjudicator_result: str
final_verdict: str
attempts: int
evidence_summary: str
messages: List[str]
# (Section 4: Define Node Functions) ----------
communal_prompt = (
"When providing citations, return them in JSON format under the key 'citations'. "
"Critically evaluate sources when coming to a final verdict. "
"Even sources that generally have a track-record of accuracy or legitimacy or authority are often biased. "
"Evaluate True and False literally. A claim that is a metaphor or an exaggeration is not true. "
"You must also use a counter-source to fully evaluate and understand whether a claim is true or false."
)
def plan_fact_check(state: FactCheckState) -> FactCheckState:
prompt = f"Create a simple plan to fact-check: '{state['query']}'"
plan = planner_llm.invoke([HumanMessage(content=prompt)]).content.strip()
state["plan"] = plan
state["messages"].append("Plan: " + plan)
return state
def run_perplexity_agent(state: FactCheckState) -> FactCheckState:
prompt = f"Using this plan, fact-check the claim:\n{state['plan']}\n{communal_prompt}"
response = perplexity_agent_llm.invoke([HumanMessage(content=prompt)])
result = response.content.strip() # Expecting a string
state["perplexity_result"] = result
state["messages"].append("---------- Perplexity: " + result)
return state
def run_openai_agent(state: FactCheckState) -> FactCheckState:
prompt = f"Using this plan, fact-check the claim:\n{state['plan']}\n{communal_prompt}"
responses = openai_agent_llm.invoke([HumanMessage(content=prompt)])
response = responses[0] if isinstance(responses, list) and responses else responses
content = response.content
# Convert list items to strings if necessary
result = "\n".join(str(item) for item in content) if isinstance(content, list) else content
result = result.strip() if isinstance(result, str) else str(result)
state["openai_result"] = result
state["messages"].append("---------- OpenAI result: " + result)
return state
def adjudicate(state: FactCheckState) -> FactCheckState:
# Build prompt for adjudication based on outputs from both agents
prompt = (
"You are a fact-check adjudicator. Two agents provided these outputs\n"
f"following these criteria: {communal_prompt}.\n"
"If there is no agreement in true/true or false/false,"
"you must send back the task to the agents to re-run verification.\n"
"Here are the results of the Perplexity and OpenAI Agents:\n"
f"Perplexity: {state['perplexity_result']}\n"
f"OpenAI: {state['openai_result']}\n"
"Return a JSON object with keys:\n"
'"final_verdict": "Claim is True"|"Claim is False"|"Re-run verification",\n'
'"evidence_summary": "Summary",\n'
'"citations": {"perplexity": [], "openai": []}'
)
raw = adjudicator_llm.invoke([HumanMessage(content=prompt)]).content.strip()
raw = raw.replace("```json", "").replace("```", "")
try:
adjud = AdjudicationResult.model_validate_json(raw)
verdict = adjud.final_verdict
evidence = adjud.evidence_summary
except ValidationError:
verdict, evidence = "Re-run verification", ""
state["adjudicator_result"] = raw
state["final_verdict"] = verdict
state["evidence_summary"] = evidence
state["messages"].append("---------- Adjudicator: " + raw)
state["attempts"] += 1
return state
def needs_recheck(state: FactCheckState) -> bool:
return state["final_verdict"] == "Re-run verification"
# (Section 5: Build and Compile the LangGraph) ----------
graph = (
StateGraph(FactCheckState)
.add_node("plan_node", plan_fact_check)
.add_node("perplexity", run_perplexity_agent)
.add_node("openai", run_openai_agent)
.add_node("adjudicate", adjudicate)
.add_edge(START, "plan_node")
.add_edge("plan_node", "perplexity")
.add_edge("perplexity", "openai")
.add_edge("openai", "adjudicate")
.add_conditional_edges(
"adjudicate",
lambda s: "recheck" if needs_recheck(s) and s["attempts"] < 3 else "finish",
{"recheck": "perplexity", "finish": END}
)
.compile()
)
# (Section 6: Streamlit Dashboard) ----------
st.title("Fact-Checking AI Agent Dashboard")
claim = st.text_input("Enter your claim:")
if st.button("Submit") and claim:
state: FactCheckState = {
"query": claim,
"plan": "",
"perplexity_result": "",
"openai_result": "",
"adjudicator_result": "",
"final_verdict": "",
"attempts": 0,
"evidence_summary": "",
"messages": []
}
with st.spinner("Processing your claim..."):
state = graph.invoke(state)
# Display final verdict as an alert
if state["final_verdict"] == "Claim is True":
st.success(state["final_verdict"])
elif state["final_verdict"] == "Claim is False":
st.error(state["final_verdict"])
else:
st.info(state["final_verdict"])
# Display evidence summary
st.markdown("**Evidence Summary:**")
st.write(state["evidence_summary"])
# Display detailed log
st.markdown("**Detailed Log:**")
st.markdown("# Adjudicator Result")
st.markdown(state["adjudicator_result"])
st.markdown("# OpenAI Result")
st.markdown(state["openai_result"])
st.markdown("# Perplexity Result")
st.markdown(state["perplexity_result"])Subscribe to the Deep Charts YouTube Channel for more informative AI and Machine Learning Tutorials.


