fix: improve telemetry accuracy with modelUsage data

- Extract token counts from modelUsage (per-model stats) instead of
  basic usage object for accurate values
- Add contextWindow from modelUsage to calculate proper context %
- Show "X% used" when > 70% free, "X% left" when running low
- Color coding: green (ok), yellow (<30% left), red (<15%), pulsing (<5%)
- Fix totalTokens undefined error (renamed to contextUsed)

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
2025-12-18 07:29:29 +01:00
parent ea7ea9c5f8
commit a91ba61dd8
2 changed files with 39 additions and 17 deletions

View File

@@ -22,6 +22,7 @@ export function StatusBar({ sessionStats, isProcessing, connected, permissionMod
cacheReadTokens = 0, cacheReadTokens = 0,
cacheCreationTokens = 0, cacheCreationTokens = 0,
numTurns = 0, numTurns = 0,
contextWindow = 200000,
isCompacting = false, isCompacting = false,
} = sessionStats || {}; } = sessionStats || {};
@@ -29,10 +30,12 @@ export function StatusBar({ sessionStats, isProcessing, connected, permissionMod
const currentMode = PERMISSION_MODES.find(m => m.value === permissionMode) || PERMISSION_MODES[0]; const currentMode = PERMISSION_MODES.find(m => m.value === permissionMode) || PERMISSION_MODES[0];
const ModeIcon = currentMode.icon; const ModeIcon = currentMode.icon;
// Calculate total tokens and estimate context usage // Calculate context usage
const totalTokens = inputTokens + outputTokens; // inputTokens from modelUsage represents tokens in current context (including cache reads)
// Claude has ~200k context, but we show relative usage // contextWindow is the model's max context size (e.g., 200000 for opus)
const contextPercent = Math.min(100, (inputTokens / 200000) * 100); const contextUsed = inputTokens + outputTokens;
const contextPercent = contextWindow > 0 ? Math.min(100, (contextUsed / contextWindow) * 100) : 0;
const contextRemaining = Math.max(0, 100 - contextPercent);
// Format cost // Format cost
const formatCost = (cost) => { const formatCost = (cost) => {
@@ -137,7 +140,7 @@ export function StatusBar({ sessionStats, isProcessing, connected, permissionMod
{/* Right side: Token usage */} {/* Right side: Token usage */}
<div className="flex items-center gap-4"> <div className="flex items-center gap-4">
{/* Token counts */} {/* Token counts */}
{totalTokens > 0 && ( {contextUsed > 0 && (
<div className="flex items-center gap-3 text-dark-400"> <div className="flex items-center gap-3 text-dark-400">
<span className="flex items-center gap-1"> <span className="flex items-center gap-1">
<span className="text-dark-500">In:</span> <span className="text-dark-500">In:</span>
@@ -156,15 +159,18 @@ export function StatusBar({ sessionStats, isProcessing, connected, permissionMod
</div> </div>
)} )}
{/* Context status - simple text based on remaining context */} {/* Context status - shows remaining context percentage */}
<div className="flex items-center gap-2"> <div className="flex items-center gap-2">
<span className="text-dark-500">Context:</span> <span className="text-dark-500">Context:</span>
{inputTokens > 0 ? ( {contextUsed > 0 ? (
<span className={`${ <span className={`${
contextPercent >= 95 ? 'text-red-400 font-medium' : contextRemaining <= 5 ? 'text-red-400 font-medium animate-pulse' :
contextPercent >= 85 ? 'text-yellow-400' : 'text-green-400' contextRemaining <= 15 ? 'text-red-400' :
contextRemaining <= 30 ? 'text-yellow-400' : 'text-green-400'
}`}> }`}>
{contextPercent >= 85 ? `${(100 - contextPercent).toFixed(0)}% left` : 'ok'} {contextRemaining <= 30
? `${contextRemaining.toFixed(0)}% left`
: `${contextPercent.toFixed(0)}% used`}
</span> </span>
) : ( ) : (
<span className="text-green-400">ok</span> <span className="text-green-400">ok</span>

View File

@@ -32,6 +32,7 @@ export function useClaudeSession() {
cacheReadTokens: 0, cacheReadTokens: 0,
cacheCreationTokens: 0, cacheCreationTokens: 0,
numTurns: 0, numTurns: 0,
contextWindow: 200000, // Default, updated from modelUsage
isCompacting: false, isCompacting: false,
}); });
@@ -361,14 +362,29 @@ export function useClaudeSession() {
setIsProcessing(false); setIsProcessing(false);
// Update session stats from result event // Update session stats from result event
if (event.usage || event.total_cost_usd !== undefined) { // Claude sends detailed modelUsage with per-model stats including contextWindow
// usage contains: input_tokens (new), cache_read_input_tokens, cache_creation_input_tokens, output_tokens
if (event.usage || event.modelUsage || event.total_cost_usd !== undefined) {
// Get the primary model's usage (usually claude-opus or claude-sonnet)
const modelUsage = event.modelUsage || {};
const primaryModel = Object.keys(modelUsage).find(k => k.includes('opus') || k.includes('sonnet'))
|| Object.keys(modelUsage)[0];
const primaryUsage = primaryModel ? modelUsage[primaryModel] : null;
// Calculate effective input tokens (what's actually in context)
// This is: new input + cache read (cache creation doesn't count towards context limit)
const effectiveInput = (event.usage?.input_tokens || 0) +
(event.usage?.cache_read_input_tokens || 0);
setSessionStats(prev => ({ setSessionStats(prev => ({
totalCost: event.total_cost_usd || prev.totalCost, totalCost: event.total_cost_usd ?? prev.totalCost,
inputTokens: event.usage?.input_tokens || prev.inputTokens, // Use modelUsage for accurate per-turn counts, fallback to usage
outputTokens: event.usage?.output_tokens || prev.outputTokens, inputTokens: primaryUsage?.inputTokens ?? effectiveInput ?? prev.inputTokens,
cacheReadTokens: event.usage?.cache_read_input_tokens || prev.cacheReadTokens, outputTokens: primaryUsage?.outputTokens ?? event.usage?.output_tokens ?? prev.outputTokens,
cacheCreationTokens: event.usage?.cache_creation_input_tokens || prev.cacheCreationTokens, cacheReadTokens: primaryUsage?.cacheReadInputTokens ?? event.usage?.cache_read_input_tokens ?? prev.cacheReadTokens,
numTurns: event.num_turns || prev.numTurns, cacheCreationTokens: primaryUsage?.cacheCreationInputTokens ?? event.usage?.cache_creation_input_tokens ?? prev.cacheCreationTokens,
numTurns: event.num_turns ?? prev.numTurns,
contextWindow: primaryUsage?.contextWindow ?? prev.contextWindow,
isCompacting: false, isCompacting: false,
})); }));
} }