Run 2026-04-29
Started Apr 29, 2026, 24:41 · ended Apr 29, 2026, 24:56 · ran for 15m 7s.
Summary
+3 events, ~15 updated, 2 filtered, +3 sources, -1 sources, 5 errors
Counters
Per-source breakdown (38)
What came from where. Use this to spot sources that contribute nothing (candidates for deprecation) or sources that consistently error.
error
rate-limited
no-events
no-events
no-events
no-events
no-events
no-events
no-events
no-events
no-events
no-events
no-events
Discovery queries (14)
Search queries the agent ran while looking for new sources this week.
-
AI safety fellowship 2026 -
AI alignment workshop 2026 -
mechanistic interpretability conference 2026 -
AI safety mixer San Francisco 2026 -
AI safety mixer London 2026 -
AI alignment reading group 2026 -
AI governance conference 2026 -
MATS scholars 2026 -
AI safety hackathon 2026 -
site:lu.ma AI safety -
Anthropic Fellows Program 2026 -
London Alignment Workshop FAR.AI 2026 -
AI safety meetup Berkeley 2026 -
"AI alignment salon" 2026
Filter decisions (2)
Events the agent considered and dropped on purpose (out-of-scope or generic AI). Distinct from errors below.
- AIGOV-26 workshop was January 26, 2026 (already past). No future iteration announced yet.
- UNIDIR Cyber Stability Conference 2026 is primarily about cybersecurity, not AI safety/alignment/governance specifically
Errors (5)
Things the agent flagged for human review. Most are harmless ("page unparseable, skipped"); persistent errors against the same source suggest deprecation.
fetch · Alignment Forum Events fetch · Meetup.com — AI safety topic fetch · AI Safety London fetch · AI Safety London fetch · Meetup.com — AI safety topic Raw run log
Full JSON
{
"started_at": "2026-04-29T00:41:15.195Z",
"ended_at": "2026-04-29T00:56:21.953Z",
"duration_ms": 906758,
"sources_scraped": 38,
"sources_failed": 3,
"events_added": 3,
"events_updated": 15,
"events_unchanged": 0,
"events_dropped_filtered": 2,
"events_dropped_duplicate": 0,
"new_sources_found": 3,
"new_sources_promoted": 3,
"deprecated_sources": 1,
"errors": [
{
"source_id": "alignment-forum",
"stage": "fetch",
"message": "429 response, same as previous run"
},
{
"source_id": "meetup-ai-safety",
"stage": "fetch",
"message": "500 internal server error from Meetup.com"
},
{
"source_id": "aisafety-london",
"stage": "fetch",
"message": "DNS resolution failed - domain may be down"
},
{
"source_id": "aisafety-london",
"url": "https://aisafety.london/",
"stage": "fetch",
"message": "DNS lookup failed for aisafety.london - domain may be expired or moved"
},
{
"source_id": "meetup-ai-safety",
"url": "https://www.meetup.com/topics/artificial-intelligence-safety/",
"stage": "fetch",
"message": "Meetup.com returned 500 internal server error"
}
],
"discovery_queries": [
"AI safety fellowship 2026",
"AI alignment workshop 2026",
"mechanistic interpretability conference 2026",
"AI safety mixer San Francisco 2026",
"AI safety mixer London 2026",
"AI alignment reading group 2026",
"AI governance conference 2026",
"MATS scholars 2026",
"AI safety hackathon 2026",
"site:lu.ma AI safety",
"Anthropic Fellows Program 2026",
"London Alignment Workshop FAR.AI 2026",
"AI safety meetup Berkeley 2026",
"\"AI alignment salon\" 2026"
],
"summary": "+3 events, ~15 updated, 2 filtered, +3 sources, -1 sources, 5 errors",
"per_source": [
{
"source_id": "ai-evaluation-programme",
"status": "ok",
"events_added": 0,
"events_updated": 1,
"events_unchanged": 0,
"errors": 0
},
{
"source_id": "ai-safety-awareness-oakland",
"status": "ok",
"events_added": 1,
"events_updated": 0,
"events_unchanged": 0,
"errors": 0
},
{
"source_id": "aigov-workshop",
"status": "ok",
"events_added": 0,
"events_updated": 0,
"events_unchanged": 0,
"errors": 0
},
{
"source_id": "aisafety-london",
"status": "error",
"events_added": 0,
"events_updated": 0,
"events_unchanged": 0,
"errors": 2
},
{
"source_id": "alignment-forum",
"status": "rate-limited",
"events_added": 0,
"events_updated": 0,
"events_unchanged": 0,
"errors": 1
},
{
"source_id": "anthropic-alignment-blog",
"status": "ok",
"events_added": 0,
"events_updated": 2,
"events_unchanged": 0,
"errors": 0
},
{
"source_id": "anthropic-events",
"status": "no-events",
"events_added": 0,
"events_updated": 0,
"events_unchanged": 0,
"errors": 0
},
{
"source_id": "apart-research",
"status": "ok",
"events_added": 0,
"events_updated": 1,
"events_unchanged": 0,
"errors": 0
},
{
"source_id": "apollo-research",
"status": "no-events",
"events_added": 0,
"events_updated": 0,
"events_unchanged": 0,
"errors": 0
},
{
"source_id": "arena",
"status": "ok",
"events_added": 0,
"events_updated": 1,
"events_unchanged": 0,
"errors": 0
},
{
"source_id": "berkeley-rdi",
"status": "ok",
"events_added": 1,
"events_updated": 0,
"events_unchanged": 0,
"errors": 0
},
{
"source_id": "bluedot",
"status": "no-events",
"events_added": 0,
"events_updated": 0,
"events_unchanged": 0,
"errors": 0
},
{
"source_id": "cais",
"status": "ok",
"events_added": 0,
"events_updated": 0,
"events_unchanged": 0,
"errors": 0
},
{
"source_id": "cbai",
"status": "no-events",
"events_added": 0,
"events_updated": 0,
"events_unchanged": 0,
"errors": 0
},
{
"source_id": "constellation-astra",
"status": "ok",
"events_added": 0,
"events_updated": 1,
"events_unchanged": 0,
"errors": 0
},
{
"source_id": "controlconf",
"status": "ok",
"events_added": 0,
"events_updated": 0,
"events_unchanged": 0,
"errors": 0
},
{
"source_id": "deepmind-events",
"status": "no-events",
"events_added": 0,
"events_updated": 0,
"events_unchanged": 0,
"errors": 0
},
{
"source_id": "ea-forum-events",
"status": "no-events",
"events_added": 0,
"events_updated": 0,
"events_unchanged": 0,
"errors": 0
},
{
"source_id": "far-ai",
"status": "ok",
"events_added": 0,
"events_updated": 1,
"events_unchanged": 0,
"errors": 0
},
{
"source_id": "foresight-institute",
"status": "ok",
"events_added": 1,
"events_updated": 1,
"events_unchanged": 0,
"errors": 0
},
{
"source_id": "govai",
"status": "no-events",
"events_added": 0,
"events_updated": 0,
"events_unchanged": 0,
"errors": 0
},
{
"source_id": "icml-safety-workshops",
"status": "ok",
"events_added": 0,
"events_updated": 0,
"events_unchanged": 0,
"errors": 0
},
{
"source_id": "iliad-conference",
"status": "ok",
"events_added": 0,
"events_updated": 1,
"events_unchanged": 0,
"errors": 0
},
{
"source_id": "lesswrong-events",
"status": "no-events",
"events_added": 0,
"events_updated": 0,
"events_unchanged": 0,
"errors": 0
},
{
"source_id": "luma-ai-safety",
"status": "no-events",
"events_added": 0,
"events_updated": 0,
"events_unchanged": 0,
"errors": 0
},
{
"source_id": "luma-alignment",
"status": "no-events",
"events_added": 0,
"events_updated": 0,
"events_unchanged": 0,
"errors": 0
},
{
"source_id": "mats",
"status": "ok",
"events_added": 0,
"events_updated": 1,
"events_unchanged": 0,
"errors": 0
},
{
"source_id": "mechinterp-workshop",
"status": "ok",
"events_added": 0,
"events_updated": 1,
"events_unchanged": 0,
"errors": 0
},
{
"source_id": "meetup-ai-safety",
"status": "error",
"events_added": 0,
"events_updated": 0,
"events_unchanged": 0,
"errors": 2
},
{
"source_id": "metr",
"status": "no-events",
"events_added": 0,
"events_updated": 0,
"events_unchanged": 0,
"errors": 0
},
{
"source_id": "ml-safety-newsletter",
"status": "no-events",
"events_added": 0,
"events_updated": 0,
"events_unchanged": 0,
"errors": 0
},
{
"source_id": "neurips-safety-workshops",
"status": "ok",
"events_added": 0,
"events_updated": 0,
"events_unchanged": 0,
"errors": 0
},
{
"source_id": "openai-safety",
"status": "ok",
"events_added": 0,
"events_updated": 1,
"events_unchanged": 0,
"errors": 0
},
{
"source_id": "pluralistic-alignment-workshop",
"status": "ok",
"events_added": 0,
"events_updated": 1,
"events_unchanged": 0,
"errors": 0
},
{
"source_id": "realign-workshop",
"status": "ok",
"events_added": 0,
"events_updated": 0,
"events_unchanged": 0,
"errors": 0
},
{
"source_id": "redwood-research",
"status": "no-events",
"events_added": 0,
"events_updated": 0,
"events_unchanged": 0,
"errors": 0
},
{
"source_id": "tais-conference",
"status": "ok",
"events_added": 0,
"events_updated": 1,
"events_unchanged": 0,
"errors": 0
},
{
"source_id": "unidir",
"status": "ok",
"events_added": 0,
"events_updated": 1,
"events_unchanged": 0,
"errors": 0
}
],
"filter_decisions": [
"AIGOV-26 workshop was January 26, 2026 (already past). No future iteration announced yet.",
"UNIDIR Cyber Stability Conference 2026 is primarily about cybersecurity, not AI safety/alignment/governance specifically"
]
}