mirror of
https://github.com/danny-avila/LibreChat.git
synced 2026-05-14 08:27:49 +00:00
* 🛡️ fix: Strict opt-in skills activation per agent
Skills were activating on every agent run that had the capability +
RBAC enabled, regardless of whether the user (ephemeral) or author
(persisted) had opted in. `scopeSkillIds(undefined)` fell through to
"full accessible catalog" whenever `agent.skills` was unset, which is
the default state for any agent created before skills existed and for
every ephemeral agent.
Activation now requires an explicit signal:
- Ephemeral agent → per-conversation skills badge toggle.
- Persisted agent → new `skills_enabled` master switch on the agent
doc, surfaced as a toggle in the Agent Builder skills section.
Enabled + empty/undefined allowlist = full accessible catalog;
enabled + non-empty allowlist = narrow to those ids; disabled (or
undefined) = no skills available, even if an allowlist is set.
Centralised the predicate in `resolveAgentScopedSkillIds` so the
primary-agent path, handoff/discovery, the subagent loop, and both
OpenAI controllers all share one source of truth. Frontend `$`
popover scope mirrors the same logic so the UI never offers skills
the backend would refuse to activate.
* test: mock resolveAgentScopedSkillIds in agent controller specs
* refactor: address review findings on skills opt-in PR
- AgentConfig: associate skills label with toggle via htmlFor for
click/keyboard affordance; simplify Switch handler to Boolean(value).
- skills: mark scopeSkillIds as @internal so runtime callers continue
to route through resolveAgentScopedSkillIds and inherit the activation
predicate (ephemeral toggle, persisted skills_enabled).
* fix(agents): include skills_enabled in agent list projection
Without this field, agents loaded via the list endpoint hydrate into the
client agentsMap with skills_enabled === undefined, causing the `$`
skill popover to hide every skill on a fresh page load even when the
agent was saved with skills_enabled: true.
* fix(skills): fail closed for persisted agents during agentsMap hydration
Returning undefined while the agents map loads let the popover render the
full catalog for a persisted agent before we could read its
skills_enabled flag, so the user could pick a skill the backend would
then refuse for the turn. Match the strict opt-in contract by returning
[] until the map is authoritative.
* refactor(skills): extract skillsHintKey for readability
Replaces the nested ternary in the skills section JSX with a
pre-computed constant so the activation -> hint key mapping reads
top-down.
* refactor(skills): unflatten skillsHintKey to remove nested ternary
1153 lines
37 KiB
JavaScript
1153 lines
37 KiB
JavaScript
const { nanoid } = require('nanoid');
|
|
const { v4: uuidv4 } = require('uuid');
|
|
const { logger } = require('@librechat/data-schemas');
|
|
const { Callback, ToolEndHandler, formatAgentMessages } = require('@librechat/agents');
|
|
const {
|
|
EModelEndpoint,
|
|
ResourceType,
|
|
PermissionBits,
|
|
hasPermissions,
|
|
AgentCapabilities,
|
|
} = require('librechat-data-provider');
|
|
const {
|
|
createRun,
|
|
buildToolSet,
|
|
loadSkillStates,
|
|
resolveAgentScopedSkillIds,
|
|
createSafeUser,
|
|
initializeAgent,
|
|
getBalanceConfig,
|
|
recordCollectedUsage,
|
|
getTransactionsConfig,
|
|
extractManualSkills,
|
|
injectSkillPrimes,
|
|
createToolExecuteHandler,
|
|
discoverConnectedAgents,
|
|
getRemoteAgentPermissions,
|
|
// Responses API
|
|
writeDone,
|
|
buildResponse,
|
|
generateResponseId,
|
|
isValidationFailure,
|
|
emitResponseCreated,
|
|
createResponseContext,
|
|
createResponseTracker,
|
|
setupStreamingResponse,
|
|
emitResponseInProgress,
|
|
convertInputToMessages,
|
|
validateResponseRequest,
|
|
buildAggregatedResponse,
|
|
createResponseAggregator,
|
|
sendResponsesErrorResponse,
|
|
createResponsesEventHandlers,
|
|
createAggregatorEventHandlers,
|
|
} = require('@librechat/api');
|
|
const {
|
|
createResponsesToolEndCallback,
|
|
buildSummarizationHandlers,
|
|
markSummarizationUsage,
|
|
createToolEndCallback,
|
|
agentLogHandlerObj,
|
|
} = require('~/server/controllers/agents/callbacks');
|
|
const { loadAgentTools, loadToolsForExecution } = require('~/server/services/ToolService');
|
|
const {
|
|
findAccessibleResources,
|
|
getEffectivePermissions,
|
|
} = require('~/server/services/PermissionService');
|
|
const {
|
|
getSkillToolDeps,
|
|
enrichWithSkillConfigurable,
|
|
buildSkillPrimedIdsByName,
|
|
} = require('~/server/services/Endpoints/agents/skillDeps');
|
|
const { getModelsConfig } = require('~/server/controllers/ModelController');
|
|
const { logViolation } = require('~/cache');
|
|
const db = require('~/models');
|
|
|
|
/**
|
|
* Creates a tool loader function for the agent.
|
|
* @param {AbortSignal} signal - The abort signal
|
|
* @param {boolean} [definitionsOnly=true] - When true, returns only serializable
|
|
* tool definitions without creating full tool instances (for event-driven mode)
|
|
*/
|
|
function createToolLoader(signal, definitionsOnly = true) {
|
|
return async function loadTools({
|
|
req,
|
|
res,
|
|
tools,
|
|
model,
|
|
agentId,
|
|
provider,
|
|
tool_options,
|
|
tool_resources,
|
|
}) {
|
|
const agent = { id: agentId, tools, provider, model, tool_options };
|
|
try {
|
|
return await loadAgentTools({
|
|
req,
|
|
res,
|
|
agent,
|
|
signal,
|
|
tool_resources,
|
|
definitionsOnly,
|
|
streamId: null,
|
|
});
|
|
} catch (error) {
|
|
logger.error('Error loading tools for agent ' + agentId, error);
|
|
}
|
|
};
|
|
}
|
|
|
|
/**
|
|
* Convert Open Responses input items to internal messages
|
|
* @param {import('@librechat/api').InputItem[]} input
|
|
* @returns {Array} Internal messages
|
|
*/
|
|
function convertToInternalMessages(input) {
|
|
return convertInputToMessages(input);
|
|
}
|
|
|
|
/**
|
|
* Load messages from a previous response/conversation
|
|
* @param {string} conversationId - The conversation/response ID
|
|
* @param {string} userId - The user ID
|
|
* @returns {Promise<Array>} Messages from the conversation
|
|
*/
|
|
async function loadPreviousMessages(conversationId, userId) {
|
|
try {
|
|
const messages = await db.getMessages({ conversationId, user: userId });
|
|
if (!messages || messages.length === 0) {
|
|
return [];
|
|
}
|
|
|
|
// Convert stored messages to internal format
|
|
return messages.map((msg) => {
|
|
const internalMsg = {
|
|
role: msg.isCreatedByUser ? 'user' : 'assistant',
|
|
content: '',
|
|
messageId: msg.messageId,
|
|
};
|
|
|
|
// Handle content - could be string or array
|
|
if (typeof msg.text === 'string') {
|
|
internalMsg.content = msg.text;
|
|
} else if (Array.isArray(msg.content)) {
|
|
// Handle content parts
|
|
internalMsg.content = msg.content;
|
|
} else if (msg.text) {
|
|
internalMsg.content = String(msg.text);
|
|
}
|
|
|
|
return internalMsg;
|
|
});
|
|
} catch (error) {
|
|
logger.error('[Responses API] Error loading previous messages:', error);
|
|
return [];
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Save input messages to database
|
|
* @param {import('express').Request} req
|
|
* @param {string} conversationId
|
|
* @param {Array} inputMessages - Internal format messages
|
|
* @param {string} agentId
|
|
* @returns {Promise<void>}
|
|
*/
|
|
async function saveInputMessages(req, conversationId, inputMessages, agentId) {
|
|
for (const msg of inputMessages) {
|
|
if (msg.role === 'user') {
|
|
await db.saveMessage(
|
|
req,
|
|
{
|
|
messageId: msg.messageId || nanoid(),
|
|
conversationId,
|
|
parentMessageId: null,
|
|
isCreatedByUser: true,
|
|
text: typeof msg.content === 'string' ? msg.content : JSON.stringify(msg.content),
|
|
sender: 'User',
|
|
endpoint: EModelEndpoint.agents,
|
|
model: agentId,
|
|
},
|
|
{ context: 'Responses API - save user input' },
|
|
);
|
|
}
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Save response output to database
|
|
* @param {import('express').Request} req
|
|
* @param {string} conversationId
|
|
* @param {string} responseId
|
|
* @param {import('@librechat/api').Response} response
|
|
* @param {string} agentId
|
|
* @returns {Promise<void>}
|
|
*/
|
|
async function saveResponseOutput(req, conversationId, responseId, response, agentId) {
|
|
// Extract text content from output items
|
|
let responseText = '';
|
|
for (const item of response.output) {
|
|
if (item.type === 'message' && item.content) {
|
|
for (const part of item.content) {
|
|
if (part.type === 'output_text' && part.text) {
|
|
responseText += part.text;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
// Save the assistant message
|
|
await db.saveMessage(
|
|
req,
|
|
{
|
|
messageId: responseId,
|
|
conversationId,
|
|
parentMessageId: null,
|
|
isCreatedByUser: false,
|
|
text: responseText,
|
|
sender: 'Agent',
|
|
endpoint: EModelEndpoint.agents,
|
|
model: agentId,
|
|
finish_reason: response.status === 'completed' ? 'stop' : response.status,
|
|
tokenCount: response.usage?.output_tokens,
|
|
},
|
|
{ context: 'Responses API - save assistant response' },
|
|
);
|
|
}
|
|
|
|
/**
|
|
* Save or update conversation
|
|
* @param {import('express').Request} req
|
|
* @param {string} conversationId
|
|
* @param {string} agentId
|
|
* @param {object} agent
|
|
* @returns {Promise<void>}
|
|
*/
|
|
async function saveConversation(req, conversationId, agentId, agent) {
|
|
await db.saveConvo(
|
|
{
|
|
userId: req?.user?.id,
|
|
isTemporary: req?.body?.isTemporary,
|
|
interfaceConfig: req?.config?.interfaceConfig,
|
|
},
|
|
{
|
|
conversationId,
|
|
endpoint: EModelEndpoint.agents,
|
|
agentId,
|
|
title: agent?.name || 'Open Responses Conversation',
|
|
model: agent?.model,
|
|
},
|
|
{ context: 'Responses API - save conversation' },
|
|
);
|
|
}
|
|
|
|
/**
|
|
* Convert stored messages to Open Responses output format
|
|
* @param {Array} messages - Stored messages
|
|
* @returns {Array} Output items
|
|
*/
|
|
function convertMessagesToOutputItems(messages) {
|
|
const output = [];
|
|
|
|
for (const msg of messages) {
|
|
if (!msg.isCreatedByUser) {
|
|
output.push({
|
|
type: 'message',
|
|
id: msg.messageId,
|
|
role: 'assistant',
|
|
status: 'completed',
|
|
content: [
|
|
{
|
|
type: 'output_text',
|
|
text: msg.text || '',
|
|
annotations: [],
|
|
},
|
|
],
|
|
});
|
|
}
|
|
}
|
|
|
|
return output;
|
|
}
|
|
|
|
/**
|
|
* Create Response - POST /v1/responses
|
|
*
|
|
* Creates a model response following the Open Responses API specification.
|
|
* Supports both streaming and non-streaming responses.
|
|
*
|
|
* @param {import('express').Request} req
|
|
* @param {import('express').Response} res
|
|
*/
|
|
const createResponse = async (req, res) => {
|
|
const appConfig = req.config;
|
|
const requestStartTime = Date.now();
|
|
|
|
// Validate request
|
|
const validation = validateResponseRequest(req.body);
|
|
if (isValidationFailure(validation)) {
|
|
return sendResponsesErrorResponse(res, 400, validation.error);
|
|
}
|
|
|
|
const request = validation.request;
|
|
const agentId = request.model;
|
|
const isStreaming = request.stream === true;
|
|
const summarizationConfig = appConfig?.summarization;
|
|
|
|
// Look up the agent
|
|
const agent = await db.getAgent({ id: agentId });
|
|
if (!agent) {
|
|
return sendResponsesErrorResponse(
|
|
res,
|
|
404,
|
|
`Agent not found: ${agentId}`,
|
|
'not_found',
|
|
'model_not_found',
|
|
);
|
|
}
|
|
|
|
// Generate IDs
|
|
const responseId = generateResponseId();
|
|
const context = createResponseContext(request, responseId);
|
|
|
|
logger.debug(
|
|
`[Responses API] Request ${responseId} started for agent ${agentId}, stream: ${isStreaming}`,
|
|
);
|
|
|
|
// Set up abort controller
|
|
const abortController = new AbortController();
|
|
|
|
// Handle client disconnect
|
|
req.on('close', () => {
|
|
if (!abortController.signal.aborted) {
|
|
abortController.abort();
|
|
logger.debug('[Responses API] Client disconnected, aborting');
|
|
}
|
|
});
|
|
|
|
try {
|
|
if (request.previous_response_id != null) {
|
|
if (typeof request.previous_response_id !== 'string') {
|
|
return sendResponsesErrorResponse(
|
|
res,
|
|
400,
|
|
'previous_response_id must be a string',
|
|
'invalid_request',
|
|
);
|
|
}
|
|
if (!(await db.getConvo(req.user?.id, request.previous_response_id))) {
|
|
return sendResponsesErrorResponse(res, 404, 'Conversation not found', 'not_found');
|
|
}
|
|
}
|
|
|
|
const conversationId = request.previous_response_id ?? uuidv4();
|
|
const parentMessageId = null;
|
|
|
|
// Build allowed providers set
|
|
const allowedProviders = new Set(
|
|
appConfig?.endpoints?.[EModelEndpoint.agents]?.allowedProviders,
|
|
);
|
|
|
|
// Create tool loader
|
|
const loadTools = createToolLoader(abortController.signal);
|
|
|
|
// Initialize the agent first to check for disableStreaming
|
|
const endpointOption = {
|
|
endpoint: agent.provider,
|
|
model_parameters: agent.model_parameters ?? {},
|
|
};
|
|
|
|
// `filterFilesByAgentAccess` is intentionally omitted: it calls
|
|
// `checkPermission` with `resourceType: AGENT`, but this route
|
|
// authorizes callers through `REMOTE_AGENT` (via
|
|
// `getRemoteAgentPermissions`), so including it would silently drop
|
|
// owner-attached context files for any remote user who has
|
|
// `REMOTE_AGENT_VIEWER` but not direct `AGENT_VIEW`.
|
|
const dbMethods = {
|
|
getConvoFiles: db.getConvoFiles,
|
|
getFiles: db.getFiles,
|
|
getUserKey: db.getUserKey,
|
|
getMessages: db.getMessages,
|
|
updateFilesUsage: db.updateFilesUsage,
|
|
getUserKeyValues: db.getUserKeyValues,
|
|
getUserCodeFiles: db.getUserCodeFiles,
|
|
getToolFilesByIds: db.getToolFilesByIds,
|
|
getCodeGeneratedFiles: db.getCodeGeneratedFiles,
|
|
listSkillsByAccess: db.listSkillsByAccess,
|
|
listAlwaysApplySkills: db.listAlwaysApplySkills,
|
|
getSkillByName: db.getSkillByName,
|
|
};
|
|
|
|
const enabledCapabilities = new Set(
|
|
appConfig?.endpoints?.[EModelEndpoint.agents]?.capabilities,
|
|
);
|
|
const skillsCapabilityEnabled = enabledCapabilities.has(AgentCapabilities.skills);
|
|
const ephemeralSkillsToggle = req.body?.ephemeralAgent?.skills === true;
|
|
const accessibleSkillIds = skillsCapabilityEnabled
|
|
? await findAccessibleResources({
|
|
userId: req.user.id,
|
|
role: req.user.role,
|
|
resourceType: ResourceType.SKILL,
|
|
requiredPermissions: PermissionBits.VIEW,
|
|
})
|
|
: [];
|
|
|
|
const { skillStates, defaultActiveOnShare } = await loadSkillStates({
|
|
userId: req.user.id,
|
|
appConfig,
|
|
getUserById: db.getUserById,
|
|
accessibleSkillIds,
|
|
});
|
|
|
|
const manualSkills = extractManualSkills(req.body);
|
|
|
|
const primaryConfig = await initializeAgent(
|
|
{
|
|
req,
|
|
res,
|
|
loadTools,
|
|
requestFiles: [],
|
|
conversationId,
|
|
parentMessageId,
|
|
agent,
|
|
endpointOption,
|
|
allowedProviders,
|
|
isInitialAgent: true,
|
|
accessibleSkillIds: resolveAgentScopedSkillIds({
|
|
agent,
|
|
accessibleSkillIds,
|
|
skillsCapabilityEnabled,
|
|
ephemeralSkillsToggle,
|
|
}),
|
|
codeEnvAvailable: enabledCapabilities.has(AgentCapabilities.execute_code),
|
|
skillStates,
|
|
defaultActiveOnShare,
|
|
manualSkills,
|
|
},
|
|
dbMethods,
|
|
);
|
|
|
|
/**
|
|
* Per-agent tool-execution context map, keyed by agentId. Ensures the
|
|
* ON_TOOL_EXECUTE callback routes each sub-agent's tool calls to the
|
|
* correct toolRegistry / userMCPAuthMap / tool_resources.
|
|
* @type {Map<string, {
|
|
* agent: object,
|
|
* toolRegistry?: import('@librechat/agents').LCToolRegistry,
|
|
* userMCPAuthMap?: Record<string, Record<string, string>>,
|
|
* tool_resources?: object,
|
|
* actionsEnabled?: boolean,
|
|
* }>}
|
|
*/
|
|
const agentToolContexts = new Map();
|
|
agentToolContexts.set(primaryConfig.id, {
|
|
agent,
|
|
toolRegistry: primaryConfig.toolRegistry,
|
|
userMCPAuthMap: primaryConfig.userMCPAuthMap,
|
|
tool_resources: primaryConfig.tool_resources,
|
|
actionsEnabled: primaryConfig.actionsEnabled,
|
|
codeEnvAvailable: primaryConfig.codeEnvAvailable,
|
|
});
|
|
|
|
// Only run BFS discovery (and pay `getModelsConfig` upfront) when the
|
|
// primary has edges to follow — the common API case is single-agent.
|
|
let handoffAgentConfigs = new Map();
|
|
let discoveredEdges = [];
|
|
let discoveredMCPAuthMap;
|
|
if (primaryConfig.edges?.length) {
|
|
const modelsConfig = await getModelsConfig(req);
|
|
({
|
|
agentConfigs: handoffAgentConfigs,
|
|
edges: discoveredEdges,
|
|
userMCPAuthMap: discoveredMCPAuthMap,
|
|
} = await discoverConnectedAgents(
|
|
{
|
|
req,
|
|
res,
|
|
primaryConfig,
|
|
endpointOption,
|
|
allowedProviders,
|
|
modelsConfig,
|
|
loadTools,
|
|
requestFiles: [],
|
|
conversationId,
|
|
parentMessageId,
|
|
// The route enforces REMOTE_AGENT on the primary; every discovered
|
|
// sub-agent must clear the same sharing boundary, not the looser
|
|
// in-app AGENT one.
|
|
resourceType: ResourceType.REMOTE_AGENT,
|
|
/** @see DiscoverConnectedAgentsParams.codeEnvAvailable */
|
|
codeEnvAvailable: enabledCapabilities.has(AgentCapabilities.execute_code),
|
|
},
|
|
{
|
|
getAgent: db.getAgent,
|
|
// Use `getRemoteAgentPermissions` so sub-agent authorization
|
|
// matches what the route's `createCheckRemoteAgentAccess`
|
|
// middleware does for the primary: AGENT owners with the SHARE
|
|
// bit are treated as remotely authorized even without an
|
|
// explicit REMOTE_AGENT grant.
|
|
checkPermission: async ({ userId, role, resourceId, requiredPermission }) => {
|
|
const permissions = await getRemoteAgentPermissions(
|
|
{ getEffectivePermissions },
|
|
userId,
|
|
role,
|
|
resourceId,
|
|
);
|
|
return hasPermissions(permissions, requiredPermission);
|
|
},
|
|
logViolation,
|
|
db: dbMethods,
|
|
onAgentInitialized: (agentId, handoffAgent, config) => {
|
|
agentToolContexts.set(agentId, {
|
|
agent: handoffAgent,
|
|
toolRegistry: config.toolRegistry,
|
|
userMCPAuthMap: config.userMCPAuthMap,
|
|
tool_resources: config.tool_resources,
|
|
actionsEnabled: config.actionsEnabled,
|
|
codeEnvAvailable: config.codeEnvAvailable,
|
|
});
|
|
},
|
|
initializeAgent,
|
|
},
|
|
));
|
|
}
|
|
|
|
primaryConfig.edges = discoveredEdges;
|
|
const runAgents = [primaryConfig, ...handoffAgentConfigs.values()];
|
|
const mergedMCPAuthMap = discoveredMCPAuthMap ?? primaryConfig.userMCPAuthMap;
|
|
|
|
// Determine if streaming is enabled (check both request and agent config)
|
|
const streamingDisabled = !!primaryConfig.model_parameters?.disableStreaming;
|
|
const actuallyStreaming = isStreaming && !streamingDisabled;
|
|
|
|
// Load previous messages if previous_response_id is provided
|
|
let previousMessages = [];
|
|
if (request.previous_response_id) {
|
|
const userId = req.user?.id ?? 'api-user';
|
|
previousMessages = await loadPreviousMessages(request.previous_response_id, userId);
|
|
}
|
|
|
|
// Convert input to internal messages
|
|
const inputMessages = convertToInternalMessages(
|
|
typeof request.input === 'string' ? request.input : request.input,
|
|
);
|
|
|
|
// Merge previous messages with new input
|
|
const allMessages = [...previousMessages, ...inputMessages];
|
|
|
|
const toolSet = buildToolSet(primaryConfig);
|
|
const formatted = formatAgentMessages(allMessages, {}, toolSet);
|
|
const formattedMessages = formatted.messages;
|
|
const initialSummary = formatted.summary;
|
|
let indexTokenCountMap = formatted.indexTokenCountMap;
|
|
|
|
/**
|
|
* Inject manual + always-apply skill primes so the model sees SKILL.md
|
|
* bodies for this turn — parity with AgentClient's chat path. The
|
|
* Responses API uses its own response-builder shape, so LibreChat-
|
|
* style card SSE events don't apply; only the message-context part
|
|
* carries over.
|
|
*/
|
|
const manualSkillPrimes = primaryConfig.manualSkillPrimes;
|
|
const alwaysApplySkillPrimes = primaryConfig.alwaysApplySkillPrimes;
|
|
if (
|
|
(manualSkillPrimes && manualSkillPrimes.length > 0) ||
|
|
(alwaysApplySkillPrimes && alwaysApplySkillPrimes.length > 0)
|
|
) {
|
|
const primeResult = injectSkillPrimes({
|
|
initialMessages: formattedMessages,
|
|
indexTokenCountMap,
|
|
manualSkillPrimes,
|
|
alwaysApplySkillPrimes,
|
|
});
|
|
indexTokenCountMap = primeResult.indexTokenCountMap;
|
|
/* Surface the cap-driven always-apply truncation at the controller
|
|
layer too — `injectSkillPrimes` already logs internally, but the
|
|
controller-level warn includes endpoint context so operators can
|
|
tell at a glance which path hit the cap. Mirrors AgentClient's
|
|
warn in `client.js`. */
|
|
if (primeResult.alwaysApplyDropped > 0) {
|
|
logger.warn(
|
|
`[Responses API] Dropped ${primeResult.alwaysApplyDropped} always-apply prime(s) to stay within MAX_PRIMED_SKILLS_PER_TURN.`,
|
|
);
|
|
}
|
|
}
|
|
|
|
/* Stable for the turn: the prime lists are fixed once
|
|
`initializeAgent` resolves. Hoisted here so both the streaming
|
|
and non-streaming `loadTools` closures below reuse it without
|
|
recomputing per tool execution. `codeEnvAvailable` is read
|
|
per-agent from the stored tool context (admin cap AND that
|
|
agent's `tools` list includes `execute_code`) — a skills-only
|
|
agent never gains sandbox access even if the admin enabled the
|
|
capability globally. */
|
|
const skillPrimedIdsByName = buildSkillPrimedIdsByName(
|
|
manualSkillPrimes,
|
|
alwaysApplySkillPrimes,
|
|
);
|
|
|
|
// Create tracker for streaming or aggregator for non-streaming
|
|
const tracker = actuallyStreaming ? createResponseTracker() : null;
|
|
const aggregator = actuallyStreaming ? null : createResponseAggregator();
|
|
|
|
// Set up response for streaming
|
|
if (actuallyStreaming) {
|
|
setupStreamingResponse(res);
|
|
|
|
// Create handler config
|
|
const handlerConfig = {
|
|
res,
|
|
context,
|
|
tracker,
|
|
};
|
|
|
|
// Emit response.created then response.in_progress per Open Responses spec
|
|
emitResponseCreated(handlerConfig);
|
|
emitResponseInProgress(handlerConfig);
|
|
|
|
// Create event handlers
|
|
const { handlers: responsesHandlers, finalizeStream } =
|
|
createResponsesEventHandlers(handlerConfig);
|
|
|
|
// Collect usage for balance tracking
|
|
const collectedUsage = [];
|
|
|
|
// Artifact promises for processing tool outputs
|
|
/** @type {Promise<import('librechat-data-provider').TAttachment | null>[]} */
|
|
const artifactPromises = [];
|
|
// Use Responses API-specific callback that emits librechat:attachment events
|
|
const toolEndCallback = createResponsesToolEndCallback({
|
|
req,
|
|
res,
|
|
tracker,
|
|
artifactPromises,
|
|
});
|
|
|
|
// Create tool execute options for event-driven tool execution
|
|
const toolExecuteOptions = {
|
|
loadTools: async (toolNames, agentId) => {
|
|
const ctx =
|
|
agentToolContexts.get(agentId) ?? agentToolContexts.get(primaryConfig.id) ?? {};
|
|
const result = await loadToolsForExecution({
|
|
req,
|
|
res,
|
|
toolNames,
|
|
agent: ctx.agent ?? agent,
|
|
signal: abortController.signal,
|
|
toolRegistry: ctx.toolRegistry,
|
|
userMCPAuthMap: ctx.userMCPAuthMap,
|
|
tool_resources: ctx.tool_resources,
|
|
actionsEnabled: ctx.actionsEnabled,
|
|
});
|
|
return enrichWithSkillConfigurable(
|
|
result,
|
|
req,
|
|
primaryConfig.accessibleSkillIds,
|
|
ctx.codeEnvAvailable === true,
|
|
skillPrimedIdsByName,
|
|
);
|
|
},
|
|
toolEndCallback,
|
|
...getSkillToolDeps(),
|
|
};
|
|
|
|
// Combine handlers
|
|
const handlers = {
|
|
on_message_delta: responsesHandlers.on_message_delta,
|
|
on_reasoning_delta: responsesHandlers.on_reasoning_delta,
|
|
on_run_step: responsesHandlers.on_run_step,
|
|
on_run_step_delta: responsesHandlers.on_run_step_delta,
|
|
on_chat_model_end: {
|
|
handle: (event, data, metadata) => {
|
|
responsesHandlers.on_chat_model_end.handle(event, data);
|
|
const usage = data?.output?.usage_metadata;
|
|
if (usage) {
|
|
const taggedUsage = markSummarizationUsage(usage, metadata);
|
|
collectedUsage.push(taggedUsage);
|
|
}
|
|
},
|
|
},
|
|
on_tool_end: new ToolEndHandler(toolEndCallback, logger),
|
|
on_run_step_completed: { handle: () => {} },
|
|
on_chain_stream: { handle: () => {} },
|
|
on_chain_end: { handle: () => {} },
|
|
on_agent_update: { handle: () => {} },
|
|
on_custom_event: { handle: () => {} },
|
|
on_tool_execute: createToolExecuteHandler(toolExecuteOptions),
|
|
on_agent_log: agentLogHandlerObj,
|
|
...(summarizationConfig?.enabled !== false
|
|
? buildSummarizationHandlers({ isStreaming: actuallyStreaming, res })
|
|
: {}),
|
|
};
|
|
|
|
// Create and run the agent
|
|
const userId = req.user?.id ?? 'api-user';
|
|
const userMCPAuthMap = mergedMCPAuthMap;
|
|
|
|
const run = await createRun({
|
|
agents: runAgents,
|
|
messages: formattedMessages,
|
|
indexTokenCountMap,
|
|
initialSummary,
|
|
runId: responseId,
|
|
summarizationConfig,
|
|
appConfig,
|
|
signal: abortController.signal,
|
|
customHandlers: handlers,
|
|
requestBody: {
|
|
messageId: responseId,
|
|
conversationId,
|
|
},
|
|
user: { id: userId },
|
|
});
|
|
|
|
if (!run) {
|
|
throw new Error('Failed to create agent run');
|
|
}
|
|
|
|
// Process the stream
|
|
const config = {
|
|
runName: 'AgentRun',
|
|
configurable: {
|
|
thread_id: conversationId,
|
|
user_id: userId,
|
|
user: createSafeUser(req.user),
|
|
requestBody: {
|
|
messageId: responseId,
|
|
conversationId,
|
|
},
|
|
...(userMCPAuthMap != null && { userMCPAuthMap }),
|
|
},
|
|
signal: abortController.signal,
|
|
streamMode: 'values',
|
|
version: 'v2',
|
|
};
|
|
|
|
await run.processStream({ messages: formattedMessages }, config, {
|
|
callbacks: {
|
|
[Callback.TOOL_ERROR]: (graph, error, toolId) => {
|
|
logger.error(`[Responses API] Tool Error "${toolId}"`, error);
|
|
},
|
|
},
|
|
});
|
|
|
|
// Record token usage against balance
|
|
const balanceConfig = getBalanceConfig(appConfig);
|
|
const transactionsConfig = getTransactionsConfig(appConfig);
|
|
recordCollectedUsage(
|
|
{
|
|
spendTokens: db.spendTokens,
|
|
spendStructuredTokens: db.spendStructuredTokens,
|
|
pricing: { getMultiplier: db.getMultiplier, getCacheMultiplier: db.getCacheMultiplier },
|
|
bulkWriteOps: { insertMany: db.bulkInsertTransactions, updateBalance: db.updateBalance },
|
|
},
|
|
{
|
|
user: userId,
|
|
conversationId,
|
|
collectedUsage,
|
|
context: 'message',
|
|
messageId: responseId,
|
|
balance: balanceConfig,
|
|
transactions: transactionsConfig,
|
|
model: primaryConfig.model || agent.model_parameters?.model,
|
|
},
|
|
).catch((err) => {
|
|
logger.error('[Responses API] Error recording usage:', err);
|
|
});
|
|
|
|
// Finalize the stream
|
|
finalizeStream();
|
|
res.end();
|
|
|
|
const duration = Date.now() - requestStartTime;
|
|
logger.debug(`[Responses API] Request ${responseId} completed in ${duration}ms (streaming)`);
|
|
|
|
// Save to database if store: true
|
|
if (request.store === true) {
|
|
try {
|
|
// Save conversation
|
|
await saveConversation(req, conversationId, agentId, agent);
|
|
|
|
// Save input messages
|
|
await saveInputMessages(req, conversationId, inputMessages, agentId);
|
|
|
|
// Build response for saving (use tracker with buildResponse for streaming)
|
|
const finalResponse = buildResponse(context, tracker, 'completed');
|
|
await saveResponseOutput(req, conversationId, responseId, finalResponse, agentId);
|
|
|
|
logger.debug(
|
|
`[Responses API] Stored response ${responseId} in conversation ${conversationId}`,
|
|
);
|
|
} catch (saveError) {
|
|
logger.error('[Responses API] Error saving response:', saveError);
|
|
// Don't fail the request if saving fails
|
|
}
|
|
}
|
|
|
|
// Wait for artifact processing after response ends (non-blocking)
|
|
if (artifactPromises.length > 0) {
|
|
Promise.all(artifactPromises).catch((artifactError) => {
|
|
logger.warn('[Responses API] Error processing artifacts:', artifactError);
|
|
});
|
|
}
|
|
} else {
|
|
const aggregatorHandlers = createAggregatorEventHandlers(aggregator);
|
|
|
|
// Collect usage for balance tracking
|
|
const collectedUsage = [];
|
|
|
|
/** @type {Promise<import('librechat-data-provider').TAttachment | null>[]} */
|
|
const artifactPromises = [];
|
|
const toolEndCallback = createToolEndCallback({ req, res, artifactPromises, streamId: null });
|
|
|
|
const toolExecuteOptions = {
|
|
loadTools: async (toolNames, agentId) => {
|
|
const ctx =
|
|
agentToolContexts.get(agentId) ?? agentToolContexts.get(primaryConfig.id) ?? {};
|
|
const result = await loadToolsForExecution({
|
|
req,
|
|
res,
|
|
toolNames,
|
|
agent: ctx.agent ?? agent,
|
|
signal: abortController.signal,
|
|
toolRegistry: ctx.toolRegistry,
|
|
userMCPAuthMap: ctx.userMCPAuthMap,
|
|
tool_resources: ctx.tool_resources,
|
|
actionsEnabled: ctx.actionsEnabled,
|
|
});
|
|
return enrichWithSkillConfigurable(
|
|
result,
|
|
req,
|
|
primaryConfig.accessibleSkillIds,
|
|
ctx.codeEnvAvailable === true,
|
|
skillPrimedIdsByName,
|
|
);
|
|
},
|
|
toolEndCallback,
|
|
...getSkillToolDeps(),
|
|
};
|
|
|
|
const handlers = {
|
|
on_message_delta: aggregatorHandlers.on_message_delta,
|
|
on_reasoning_delta: aggregatorHandlers.on_reasoning_delta,
|
|
on_run_step: aggregatorHandlers.on_run_step,
|
|
on_run_step_delta: aggregatorHandlers.on_run_step_delta,
|
|
on_chat_model_end: {
|
|
handle: (event, data, metadata) => {
|
|
aggregatorHandlers.on_chat_model_end.handle(event, data);
|
|
const usage = data?.output?.usage_metadata;
|
|
if (usage) {
|
|
const taggedUsage = markSummarizationUsage(usage, metadata);
|
|
collectedUsage.push(taggedUsage);
|
|
}
|
|
},
|
|
},
|
|
on_tool_end: new ToolEndHandler(toolEndCallback, logger),
|
|
on_run_step_completed: { handle: () => {} },
|
|
on_chain_stream: { handle: () => {} },
|
|
on_chain_end: { handle: () => {} },
|
|
on_agent_update: { handle: () => {} },
|
|
on_custom_event: { handle: () => {} },
|
|
on_tool_execute: createToolExecuteHandler(toolExecuteOptions),
|
|
on_agent_log: agentLogHandlerObj,
|
|
...(summarizationConfig?.enabled !== false
|
|
? buildSummarizationHandlers({ isStreaming: false, res })
|
|
: {}),
|
|
};
|
|
|
|
const userId = req.user?.id ?? 'api-user';
|
|
const userMCPAuthMap = mergedMCPAuthMap;
|
|
|
|
const run = await createRun({
|
|
agents: runAgents,
|
|
messages: formattedMessages,
|
|
indexTokenCountMap,
|
|
initialSummary,
|
|
runId: responseId,
|
|
summarizationConfig,
|
|
appConfig,
|
|
signal: abortController.signal,
|
|
customHandlers: handlers,
|
|
requestBody: {
|
|
messageId: responseId,
|
|
conversationId,
|
|
},
|
|
user: { id: userId },
|
|
});
|
|
|
|
if (!run) {
|
|
throw new Error('Failed to create agent run');
|
|
}
|
|
|
|
const config = {
|
|
runName: 'AgentRun',
|
|
configurable: {
|
|
thread_id: conversationId,
|
|
user_id: userId,
|
|
user: createSafeUser(req.user),
|
|
requestBody: {
|
|
messageId: responseId,
|
|
conversationId,
|
|
},
|
|
...(userMCPAuthMap != null && { userMCPAuthMap }),
|
|
},
|
|
signal: abortController.signal,
|
|
streamMode: 'values',
|
|
version: 'v2',
|
|
};
|
|
|
|
await run.processStream({ messages: formattedMessages }, config, {
|
|
callbacks: {
|
|
[Callback.TOOL_ERROR]: (graph, error, toolId) => {
|
|
logger.error(`[Responses API] Tool Error "${toolId}"`, error);
|
|
},
|
|
},
|
|
});
|
|
|
|
// Record token usage against balance
|
|
const balanceConfig = getBalanceConfig(appConfig);
|
|
const transactionsConfig = getTransactionsConfig(appConfig);
|
|
recordCollectedUsage(
|
|
{
|
|
spendTokens: db.spendTokens,
|
|
spendStructuredTokens: db.spendStructuredTokens,
|
|
pricing: { getMultiplier: db.getMultiplier, getCacheMultiplier: db.getCacheMultiplier },
|
|
bulkWriteOps: { insertMany: db.bulkInsertTransactions, updateBalance: db.updateBalance },
|
|
},
|
|
{
|
|
user: userId,
|
|
conversationId,
|
|
collectedUsage,
|
|
context: 'message',
|
|
messageId: responseId,
|
|
balance: balanceConfig,
|
|
transactions: transactionsConfig,
|
|
model: primaryConfig.model || agent.model_parameters?.model,
|
|
},
|
|
).catch((err) => {
|
|
logger.error('[Responses API] Error recording usage:', err);
|
|
});
|
|
|
|
if (artifactPromises.length > 0) {
|
|
try {
|
|
await Promise.all(artifactPromises);
|
|
} catch (artifactError) {
|
|
logger.warn('[Responses API] Error processing artifacts:', artifactError);
|
|
}
|
|
}
|
|
|
|
const response = buildAggregatedResponse(context, aggregator);
|
|
|
|
if (request.store === true) {
|
|
try {
|
|
await saveConversation(req, conversationId, agentId, agent);
|
|
|
|
await saveInputMessages(req, conversationId, inputMessages, agentId);
|
|
|
|
await saveResponseOutput(req, conversationId, responseId, response, agentId);
|
|
|
|
logger.debug(
|
|
`[Responses API] Stored response ${responseId} in conversation ${conversationId}`,
|
|
);
|
|
} catch (saveError) {
|
|
logger.error('[Responses API] Error saving response:', saveError);
|
|
// Don't fail the request if saving fails
|
|
}
|
|
}
|
|
|
|
res.json(response);
|
|
|
|
const duration = Date.now() - requestStartTime;
|
|
logger.debug(
|
|
`[Responses API] Request ${responseId} completed in ${duration}ms (non-streaming)`,
|
|
);
|
|
}
|
|
} catch (error) {
|
|
const errorMessage = error instanceof Error ? error.message : 'An error occurred';
|
|
logger.error('[Responses API] Error:', error);
|
|
|
|
// Check if we already started streaming (headers sent)
|
|
if (res.headersSent) {
|
|
// Headers already sent, write error event and close
|
|
writeDone(res);
|
|
res.end();
|
|
} else {
|
|
// Forward upstream provider status codes (e.g., Anthropic 400s) instead of masking as 500
|
|
const statusCode =
|
|
typeof error?.status === 'number' && error.status >= 400 && error.status < 600
|
|
? error.status
|
|
: 500;
|
|
const errorType = statusCode >= 400 && statusCode < 500 ? 'invalid_request' : 'server_error';
|
|
sendResponsesErrorResponse(res, statusCode, errorMessage, errorType);
|
|
}
|
|
}
|
|
};
|
|
|
|
/**
|
|
* List available agents as models - GET /v1/models (also works with /v1/responses/models)
|
|
*
|
|
* Returns a list of available agents the user has remote access to.
|
|
*
|
|
* @param {import('express').Request} req
|
|
* @param {import('express').Response} res
|
|
*/
|
|
const listModels = async (req, res) => {
|
|
try {
|
|
const userId = req.user?.id;
|
|
const userRole = req.user?.role;
|
|
|
|
if (!userId) {
|
|
return sendResponsesErrorResponse(res, 401, 'Authentication required', 'auth_error');
|
|
}
|
|
|
|
// Find agents the user has remote access to (VIEW permission on REMOTE_AGENT)
|
|
const accessibleAgentIds = await findAccessibleResources({
|
|
userId,
|
|
role: userRole,
|
|
resourceType: ResourceType.REMOTE_AGENT,
|
|
requiredPermissions: PermissionBits.VIEW,
|
|
});
|
|
|
|
// Get the accessible agents
|
|
let agents = [];
|
|
if (accessibleAgentIds.length > 0) {
|
|
agents = await db.getAgents({ _id: { $in: accessibleAgentIds } });
|
|
}
|
|
|
|
// Convert to models format
|
|
const models = agents.map((agent) => ({
|
|
id: agent.id,
|
|
object: 'model',
|
|
created: Math.floor(new Date(agent.createdAt).getTime() / 1000),
|
|
owned_by: agent.author ?? 'librechat',
|
|
// Additional metadata
|
|
name: agent.name,
|
|
description: agent.description,
|
|
provider: agent.provider,
|
|
}));
|
|
|
|
res.json({
|
|
object: 'list',
|
|
data: models,
|
|
});
|
|
} catch (error) {
|
|
logger.error('[Responses API] Error listing models:', error);
|
|
sendResponsesErrorResponse(
|
|
res,
|
|
500,
|
|
error instanceof Error ? error.message : 'Failed to list models',
|
|
'server_error',
|
|
);
|
|
}
|
|
};
|
|
|
|
/**
|
|
* Get Response - GET /v1/responses/:id
|
|
*
|
|
* Retrieves a stored response by its ID.
|
|
* The response ID maps to a conversationId in LibreChat's storage.
|
|
*
|
|
* @param {import('express').Request} req
|
|
* @param {import('express').Response} res
|
|
*/
|
|
const getResponse = async (req, res) => {
|
|
try {
|
|
const responseId = req.params.id;
|
|
const userId = req.user?.id;
|
|
|
|
if (!responseId) {
|
|
return sendResponsesErrorResponse(res, 400, 'Response ID is required');
|
|
}
|
|
|
|
// The responseId could be either the response ID or the conversation ID
|
|
// Try to find a conversation with this ID
|
|
const conversation = await db.getConvo(userId, responseId);
|
|
|
|
if (!conversation) {
|
|
return sendResponsesErrorResponse(
|
|
res,
|
|
404,
|
|
`Response not found: ${responseId}`,
|
|
'not_found',
|
|
'response_not_found',
|
|
);
|
|
}
|
|
|
|
// Load messages for this conversation
|
|
const messages = await db.getMessages({ conversationId: responseId, user: userId });
|
|
|
|
if (!messages || messages.length === 0) {
|
|
return sendResponsesErrorResponse(
|
|
res,
|
|
404,
|
|
`No messages found for response: ${responseId}`,
|
|
'not_found',
|
|
'response_not_found',
|
|
);
|
|
}
|
|
|
|
// Convert messages to Open Responses output format
|
|
const output = convertMessagesToOutputItems(messages);
|
|
|
|
// Find the last assistant message for usage info
|
|
const lastAssistantMessage = messages.filter((m) => !m.isCreatedByUser).pop();
|
|
|
|
// Build the response object
|
|
const response = {
|
|
id: responseId,
|
|
object: 'response',
|
|
created_at: Math.floor(new Date(conversation.createdAt || Date.now()).getTime() / 1000),
|
|
completed_at: Math.floor(new Date(conversation.updatedAt || Date.now()).getTime() / 1000),
|
|
status: 'completed',
|
|
incomplete_details: null,
|
|
model: conversation.agentId || conversation.model || 'unknown',
|
|
previous_response_id: null,
|
|
instructions: null,
|
|
output,
|
|
error: null,
|
|
tools: [],
|
|
tool_choice: 'auto',
|
|
truncation: 'disabled',
|
|
parallel_tool_calls: true,
|
|
text: { format: { type: 'text' } },
|
|
temperature: 1,
|
|
top_p: 1,
|
|
presence_penalty: 0,
|
|
frequency_penalty: 0,
|
|
top_logprobs: null,
|
|
reasoning: null,
|
|
user: userId,
|
|
usage: lastAssistantMessage?.tokenCount
|
|
? {
|
|
input_tokens: 0,
|
|
output_tokens: lastAssistantMessage.tokenCount,
|
|
total_tokens: lastAssistantMessage.tokenCount,
|
|
}
|
|
: null,
|
|
max_output_tokens: null,
|
|
max_tool_calls: null,
|
|
store: true,
|
|
background: false,
|
|
service_tier: 'default',
|
|
metadata: {},
|
|
safety_identifier: null,
|
|
prompt_cache_key: null,
|
|
};
|
|
|
|
res.json(response);
|
|
} catch (error) {
|
|
logger.error('[Responses API] Error getting response:', error);
|
|
sendResponsesErrorResponse(
|
|
res,
|
|
500,
|
|
error instanceof Error ? error.message : 'Failed to get response',
|
|
'server_error',
|
|
);
|
|
}
|
|
};
|
|
|
|
module.exports = {
|
|
createResponse,
|
|
getResponse,
|
|
listModels,
|
|
};
|