π File detail
utils/contextSuggestions.ts
π― Use case
This file lives under βutils/β, which covers cross-cutting helpers (shell, tempfiles, settings, messages, process input, β¦). On the API surface it exposes SuggestionSeverity, ContextSuggestion, and generateContextSuggestions β mainly types, interfaces, or factory objects. It composes internal code from tools, analyzeContext, file, and format (relative imports).
Generated from folder role, exports, dependency roots, and inline comments β not hand-reviewed for every path.
π§ Inline summary
import { BASH_TOOL_NAME } from '../tools/BashTool/toolName.js' import { FILE_READ_TOOL_NAME } from '../tools/FileReadTool/prompt.js' import { GREP_TOOL_NAME } from '../tools/GrepTool/prompt.js' import { WEB_FETCH_TOOL_NAME } from '../tools/WebFetchTool/prompt.js' import type { ContextData } from './analyzeContext.js'
π€ Exports (heuristic)
SuggestionSeverityContextSuggestiongenerateContextSuggestions
π₯οΈ Source preview
import { BASH_TOOL_NAME } from '../tools/BashTool/toolName.js'
import { FILE_READ_TOOL_NAME } from '../tools/FileReadTool/prompt.js'
import { GREP_TOOL_NAME } from '../tools/GrepTool/prompt.js'
import { WEB_FETCH_TOOL_NAME } from '../tools/WebFetchTool/prompt.js'
import type { ContextData } from './analyzeContext.js'
import { getDisplayPath } from './file.js'
import { formatTokens } from './format.js'
// --
export type SuggestionSeverity = 'info' | 'warning'
export type ContextSuggestion = {
severity: SuggestionSeverity
title: string
detail: string
/** Estimated tokens that could be saved */
savingsTokens?: number
}
// Thresholds for triggering suggestions
const LARGE_TOOL_RESULT_PERCENT = 15 // tool results > 15% of context
const LARGE_TOOL_RESULT_TOKENS = 10_000
const READ_BLOAT_PERCENT = 5 // Read results > 5% of context
const NEAR_CAPACITY_PERCENT = 80
const MEMORY_HIGH_PERCENT = 5
const MEMORY_HIGH_TOKENS = 5_000
// --
export function generateContextSuggestions(
data: ContextData,
): ContextSuggestion[] {
const suggestions: ContextSuggestion[] = []
checkNearCapacity(data, suggestions)
checkLargeToolResults(data, suggestions)
checkReadResultBloat(data, suggestions)
checkMemoryBloat(data, suggestions)
checkAutoCompactDisabled(data, suggestions)
// Sort: warnings first, then by savings descending
suggestions.sort((a, b) => {
if (a.severity !== b.severity) {
return a.severity === 'warning' ? -1 : 1
}
return (b.savingsTokens ?? 0) - (a.savingsTokens ?? 0)
})
return suggestions
}
// --
function checkNearCapacity(
data: ContextData,
suggestions: ContextSuggestion[],
): void {
if (data.percentage >= NEAR_CAPACITY_PERCENT) {
suggestions.push({
severity: 'warning',
title: `Context is ${data.percentage}% full`,
detail: data.isAutoCompactEnabled
? 'Autocompact will trigger soon, which discards older messages. Use /compact now to control what gets kept.'
: 'Autocompact is disabled. Use /compact to free space, or enable autocompact in /config.',
})
}
}
function checkLargeToolResults(
data: ContextData,
suggestions: ContextSuggestion[],
): void {
if (!data.messageBreakdown) return
for (const tool of data.messageBreakdown.toolCallsByType) {
const totalToolTokens = tool.callTokens + tool.resultTokens
const percent = (totalToolTokens / data.rawMaxTokens) * 100
if (
percent < LARGE_TOOL_RESULT_PERCENT ||
totalToolTokens < LARGE_TOOL_RESULT_TOKENS
) {
continue
}
const suggestion = getLargeToolSuggestion(
tool.name,
totalToolTokens,
percent,
)
if (suggestion) {
suggestions.push(suggestion)
}
}
}
function getLargeToolSuggestion(
toolName: string,
tokens: number,
percent: number,
): ContextSuggestion | null {
const tokenStr = formatTokens(tokens)
switch (toolName) {
case BASH_TOOL_NAME:
return {
severity: 'warning',
title: `Bash results using ${tokenStr} tokens (${percent.toFixed(0)}%)`,
detail:
'Pipe output through head, tail, or grep to reduce result size. Avoid cat on large files \u2014 use Read with offset/limit instead.',
savingsTokens: Math.floor(tokens * 0.5),
}
case FILE_READ_TOOL_NAME:
return {
severity: 'info',
title: `Read results using ${tokenStr} tokens (${percent.toFixed(0)}%)`,
detail:
'Use offset and limit parameters to read only the sections you need. Avoid re-reading entire files when you only need a few lines.',
savingsTokens: Math.floor(tokens * 0.3),
}
case GREP_TOOL_NAME:
return {
severity: 'info',
title: `Grep results using ${tokenStr} tokens (${percent.toFixed(0)}%)`,
detail:
'Add more specific patterns or use the glob or type parameter to narrow file types. Consider Glob for file discovery instead of Grep.',
savingsTokens: Math.floor(tokens * 0.3),
}
case WEB_FETCH_TOOL_NAME:
return {
severity: 'info',
title: `WebFetch results using ${tokenStr} tokens (${percent.toFixed(0)}%)`,
detail:
'Web page content can be very large. Consider extracting only the specific information needed.',
savingsTokens: Math.floor(tokens * 0.4),
}
default:
if (percent >= 20) {
return {
severity: 'info',
title: `${toolName} using ${tokenStr} tokens (${percent.toFixed(0)}%)`,
detail: `This tool is consuming a significant portion of context.`,
savingsTokens: Math.floor(tokens * 0.2),
}
}
return null
}
}
function checkReadResultBloat(
data: ContextData,
suggestions: ContextSuggestion[],
): void {
if (!data.messageBreakdown) return
const callsByType = data.messageBreakdown.toolCallsByType
const readTool = callsByType.find(t => t.name === FILE_READ_TOOL_NAME)
if (!readTool) return
const totalReadTokens = readTool.callTokens + readTool.resultTokens
const totalReadPercent = (totalReadTokens / data.rawMaxTokens) * 100
const readPercent = (readTool.resultTokens / data.rawMaxTokens) * 100
// Skip if already covered by checkLargeToolResults (>= 15% band)
if (
totalReadPercent >= LARGE_TOOL_RESULT_PERCENT &&
totalReadTokens >= LARGE_TOOL_RESULT_TOKENS
) {
return
}
if (
readPercent >= READ_BLOAT_PERCENT &&
readTool.resultTokens >= LARGE_TOOL_RESULT_TOKENS
) {
suggestions.push({
severity: 'info',
title: `File reads using ${formatTokens(readTool.resultTokens)} tokens (${readPercent.toFixed(0)}%)`,
detail:
'If you are re-reading files, consider referencing earlier reads. Use offset/limit for large files.',
savingsTokens: Math.floor(readTool.resultTokens * 0.3),
})
}
}
function checkMemoryBloat(
data: ContextData,
suggestions: ContextSuggestion[],
): void {
const totalMemoryTokens = data.memoryFiles.reduce(
(sum, f) => sum + f.tokens,
0,
)
const memoryPercent = (totalMemoryTokens / data.rawMaxTokens) * 100
if (
memoryPercent >= MEMORY_HIGH_PERCENT &&
totalMemoryTokens >= MEMORY_HIGH_TOKENS
) {
const largestFiles = [...data.memoryFiles]
.sort((a, b) => b.tokens - a.tokens)
.slice(0, 3)
.map(f => {
const name = getDisplayPath(f.path)
return `${name} (${formatTokens(f.tokens)})`
})
.join(', ')
suggestions.push({
severity: 'info',
title: `Memory files using ${formatTokens(totalMemoryTokens)} tokens (${memoryPercent.toFixed(0)}%)`,
detail: `Largest: ${largestFiles}. Use /memory to review and prune stale entries.`,
savingsTokens: Math.floor(totalMemoryTokens * 0.3),
})
}
}
function checkAutoCompactDisabled(
data: ContextData,
suggestions: ContextSuggestion[],
): void {
if (
!data.isAutoCompactEnabled &&
data.percentage >= 50 &&
data.percentage < NEAR_CAPACITY_PERCENT
) {
suggestions.push({
severity: 'info',
title: 'Autocompact is disabled',
detail:
'Without autocompact, you will hit context limits and lose the conversation. Enable it in /config or use /compact manually.',
})
}
}