import json
# Build the LLM prompt
evidence_block = json.dumps(related_claims, indent=2)
SYSTEM_PROMPT = """You are a senior analyst producing a thesis validation brief.
You will receive a hypothesis and a set of verified expert claims with source
attribution. Your job is to assess whether the evidence supports, contradicts,
or is insufficient to evaluate the thesis.
Rules:
- Only cite claims actually provided. Never fabricate sources.
- Distinguish between SUPPORTING evidence (directionally agrees with the thesis),
CONTRADICTING evidence (directionally disagrees), and CONTEXTUAL evidence
(relevant but neither supports nor contradicts).
- Be direct about the strength of evidence. "3 sources agree" is stronger than
"1 source agrees." Say so.
- End with a clear VERDICT: Supported, Challenged, Mixed, or Insufficient Evidence.
- Keep the brief under 400 words.
Output format (markdown):
## Thesis Validation Brief
**Thesis:** <the hypothesis being tested>
**Verdict:** <Supported | Challenged | Mixed | Insufficient Evidence>
**Confidence:** <High | Medium | Low> (based on volume and diversity of evidence)
### Supporting Evidence
<bulleted list of claims with source attribution, or "None found">
### Contradicting Evidence
<bulleted list of claims with source attribution, or "None found">
### Contextual Evidence
<bulleted list of relevant-but-neutral claims, or "None found">
### Assessment
<2-3 sentence synthesis: what does this mean for the decision at hand?>
### Recommended Next Steps
<1-2 specific actions based on the evidence gaps>
"""
USER_PROMPT = f"""Thesis to validate:
"{thesis}"
Verified expert claims (from Gildea intelligence database):
{evidence_block}
"""
# Pass SYSTEM_PROMPT and USER_PROMPT to your LLM of choice.
# Example with Anthropic SDK:
#
# import anthropic
# llm = anthropic.Anthropic()
# response = llm.messages.create(
# model="claude-sonnet-4-20250514",
# max_tokens=1024,
# system=SYSTEM_PROMPT,
# messages=[{"role": "user", "content": USER_PROMPT}],
# )
# brief = response.content[0].text
# Or print the prompts to use in any LLM interface:
print("=== SYSTEM PROMPT ===")
print(SYSTEM_PROMPT)
print("=== USER PROMPT ===")
print(USER_PROMPT)