fix: improve telemetry accuracy with modelUsage data

- Extract token counts from modelUsage (per-model stats) instead of
  basic usage object for accurate values
- Add contextWindow from modelUsage to calculate proper context %
- Show "X% used" when > 70% free, "X% left" when running low
- Color coding: green (ok), yellow (<30% left), red (<15%), pulsing (<5%)
- Fix totalTokens undefined error (renamed to contextUsed)

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
2025-12-18 07:29:29 +01:00
parent ea7ea9c5f8
commit a91ba61dd8
2 changed files with 39 additions and 17 deletions

View File

@@ -32,6 +32,7 @@ export function useClaudeSession() {
cacheReadTokens: 0,
cacheCreationTokens: 0,
numTurns: 0,
contextWindow: 200000, // Default, updated from modelUsage
isCompacting: false,
});
@@ -361,14 +362,29 @@ export function useClaudeSession() {
setIsProcessing(false);
// Update session stats from result event
if (event.usage || event.total_cost_usd !== undefined) {
// Claude sends detailed modelUsage with per-model stats including contextWindow
// usage contains: input_tokens (new), cache_read_input_tokens, cache_creation_input_tokens, output_tokens
if (event.usage || event.modelUsage || event.total_cost_usd !== undefined) {
// Get the primary model's usage (usually claude-opus or claude-sonnet)
const modelUsage = event.modelUsage || {};
const primaryModel = Object.keys(modelUsage).find(k => k.includes('opus') || k.includes('sonnet'))
|| Object.keys(modelUsage)[0];
const primaryUsage = primaryModel ? modelUsage[primaryModel] : null;
// Calculate effective input tokens (what's actually in context)
// This is: new input + cache read (cache creation doesn't count towards context limit)
const effectiveInput = (event.usage?.input_tokens || 0) +
(event.usage?.cache_read_input_tokens || 0);
setSessionStats(prev => ({
totalCost: event.total_cost_usd || prev.totalCost,
inputTokens: event.usage?.input_tokens || prev.inputTokens,
outputTokens: event.usage?.output_tokens || prev.outputTokens,
cacheReadTokens: event.usage?.cache_read_input_tokens || prev.cacheReadTokens,
cacheCreationTokens: event.usage?.cache_creation_input_tokens || prev.cacheCreationTokens,
numTurns: event.num_turns || prev.numTurns,
totalCost: event.total_cost_usd ?? prev.totalCost,
// Use modelUsage for accurate per-turn counts, fallback to usage
inputTokens: primaryUsage?.inputTokens ?? effectiveInput ?? prev.inputTokens,
outputTokens: primaryUsage?.outputTokens ?? event.usage?.output_tokens ?? prev.outputTokens,
cacheReadTokens: primaryUsage?.cacheReadInputTokens ?? event.usage?.cache_read_input_tokens ?? prev.cacheReadTokens,
cacheCreationTokens: primaryUsage?.cacheCreationInputTokens ?? event.usage?.cache_creation_input_tokens ?? prev.cacheCreationTokens,
numTurns: event.num_turns ?? prev.numTurns,
contextWindow: primaryUsage?.contextWindow ?? prev.contextWindow,
isCompacting: false,
}));
}