Keep the last recent_n visible substantive envelopes verbatim; fold everything older into a single :class:CompactionSummary at the head of the projection.
Bounds prompt size at any turn count — the projection is at most recent_n + 1 events regardless of WAL length. CompactionSummary is recognised by autogen/beta/policies/conversation.py so it renders correctly in the LLM-facing message stream.
The summary is a static stat-style line ("Earlier in this session: N messages from a, b.") — no LLM call.
Source code in autogen/beta/network/views/builtin.py
| def __init__(self, recent_n: int) -> None:
if recent_n < 1:
raise ValueError(f"recent_n must be >= 1, got {recent_n}")
self._recent_n = recent_n
|
name class-attribute instance-attribute
name = 'windowed_summary'
project async
project(wal, *, participant_id, session, render_envelope)
Source code in autogen/beta/network/views/builtin.py
| async def project(
self,
wal: list[Envelope],
*,
participant_id: str,
session: SessionMetadata,
render_envelope: EnvelopeRenderer,
) -> list[BaseEvent]:
visible: list[tuple[Envelope, str]] = []
for envelope in wal:
if not visible_to(envelope, participant_id):
continue
text = render_envelope(envelope)
if text is None:
continue
visible.append((envelope, text))
if len(visible) <= self._recent_n:
return [_to_event(env, txt, participant_id) for env, txt in visible]
cutoff = len(visible) - self._recent_n
older = visible[:cutoff]
recent = visible[cutoff:]
summary = _summarize_older([env for env, _ in older])
compaction = CompactionSummary(summary=summary, event_count=len(older))
return [compaction, *(_to_event(env, txt, participant_id) for env, txt in recent)]
|