import json
# Run multiple targeted searches to build a comprehensive picture
topic = "open source AI model licensing"
# Angle 1: Expert theses (what are the big arguments?)
theses = client.search(topic, unit_type="thesis_sentence", limit=10)
# Angle 2: Specific claims (what are the verified facts?)
claims = client.search(topic, unit_type="analysis_claim", limit=10)
# Angle 3: Recent developments (what just happened?)
recent = client.search(topic, recency_boost=0.8, limit=10)
# Angle 4: Entity-scoped (what are key players doing?)
meta_view = client.search("open source licensing strategy", entity="Meta", limit=5)
# Compile all findings
research_data = {
"topic": topic,
"expert_theses": [
{
"text": h["unit"]["text"],
"source": h["citation"]["registrable_domain"],
"signal": h["citation"]["signal_title"],
}
for h in theses["data"]
],
"verified_claims": [
{
"text": h["unit"]["text"],
"source": h["citation"]["registrable_domain"],
"signal": h["citation"]["signal_title"],
}
for h in claims["data"]
],
"recent_developments": [
{
"text": h["unit"]["text"],
"source": h["citation"]["registrable_domain"],
"signal": h["citation"]["signal_title"],
"date": h["citation"].get("published_at", "")[:10],
}
for h in recent["data"]
],
"key_player_intelligence": [
{
"text": h["unit"]["text"],
"source": h["citation"]["registrable_domain"],
"entity_scope": "Meta",
}
for h in meta_view["data"]
],
"total_sources": len(set(
h["citation"]["registrable_domain"]
for search_results in [theses, claims, recent, meta_view]
for h in search_results["data"]
)),
}
research_json = json.dumps(research_data, indent=2)
SYSTEM_PROMPT = """You are a senior research analyst producing a structured dossier
on an AI market topic. You will receive findings from multiple search angles across
Gildea's verified intelligence database: expert theses, verified claims, recent
developments, and entity-specific intelligence.
Rules:
- This is for someone who needs to get smart on a topic in 5 minutes — not an
academic literature review.
- Structure the dossier around WHAT IS KNOWN (established facts), WHAT IS DEBATED
(competing expert views), and WHAT IS UNKNOWN (gaps in coverage).
- Expert theses represent analytical arguments. Claims represent verified facts.
Treat them differently — theses are opinions (attributed), claims are facts
(corroborated).
- Always attribute: "(source: domain.com)" after each finding.
- Identify CONTRADICTIONS between sources explicitly. Disagreement among experts
is one of the most valuable things you can surface.
- End with "Knowledge Gaps" — what questions remain unanswered by the available
evidence? This tells the reader where to focus further research.
- Keep it under 600 words.
Output format (markdown):
## Research Dossier: [Topic]
**Sources reviewed:** [N] distinct expert sources
**Coverage period:** [date range of findings]
### Executive Summary
<3-4 sentences: what a decision-maker needs to know about this topic right now>
### What Is Known (Verified Facts)
<Bulleted list of verified claims with source attribution. Group by sub-topic
if more than 5 claims.>
### What Is Debated (Competing Expert Views)
<Summarize the 2-3 main analytical positions experts hold on this topic, with
attribution. Highlight contradictions.>
### Recent Developments
<Bulleted list of time-sensitive findings from the recency-boosted search>
### Key Player Activity
<What specific entities are doing, based on entity-scoped search results>
### Knowledge Gaps
<Bulleted list of 2-4 questions that the available evidence does NOT answer>
### Recommended Next Steps
<2-3 specific follow-up actions: additional searches, entities to monitor,
signals to read in full>
"""
USER_PROMPT = f"""Produce a research dossier from these multi-angle findings:
{research_json}
"""
# Pass SYSTEM_PROMPT and USER_PROMPT to your LLM of choice.
# Example with Anthropic SDK:
#
# import anthropic
# llm = anthropic.Anthropic()
# response = llm.messages.create(
# model="claude-sonnet-4-20250514",
# max_tokens=2048,
# system=SYSTEM_PROMPT,
# messages=[{"role": "user", "content": USER_PROMPT}],
# )
# dossier = response.content[0].text
print("=== SYSTEM PROMPT ===")
print(SYSTEM_PROMPT)
print("=== USER PROMPT ===")
print(USER_PROMPT)