Lesson 15: The Manual Tool-Use Loop
This lesson deconstructs the high-level .run()
agent to reveal the fundamental, multi-step process of tool use, giving you maximum control over the interaction.
Code: lesson_15_manual_tool_loop.mjs
Instead of .run()
, this script uses .stream()
twice. The first call gets the model's request to use a tool. We then execute the tool, add the results to the message history, and make a second .stream()
call to get the final, natural language summary.
// lesson_15_manual_tool_loop.mjs
// Merci SDK Tutorial: Lesson 15 - The Manual Tool-Use Loop
// --- IMPORTS ---
// We need more helpers to manually construct the messages for the tool loop.
import {
MerciClient,
createUserMessage,
executeTools,
createAssistantToolCallMessage,
createToolResultMessage,
createAssistantTextMessage
} from '../lib/merci.2.14.0.mjs';
import { token } from '../secret/token.mjs';
const MODEL = 'google-chat-gemini-flash-2.5';
// --- TOOL DEFINITION ---
const weatherTool = {
name: 'get_current_weather',
parameters: {
schema: { type: 'object', properties: { city: { type: 'string' } }, required: ['city'] }
},
execute: async ({ city }) => {
console.log(`\n[TOOL EXECUTING] Getting weather for ${city}...`);
const temperatures = { 'paris': { temperature: '14', unit: 'celsius' } };
return temperatures[city.toLowerCase()] || { error: 'City not found' };
},
};
async function main() {
console.log(`--- Merci SDK Tutorial: Lesson 15 - The Manual Tool-Use Loop (Model: ${MODEL}) ---`);
console.log('This lesson deconstructs the `.run()` agent to show the fundamental steps.');
try {
// --- STEP 1: INITIALIZE CLIENT AND SESSION ---
console.log('\n[STEP 1] Initializing client and configuring session with tools...');
const client = new MerciClient({ token });
const chatSession = client.chat.session(MODEL).withTools([weatherTool]);
// --- STEP 2: PREPARE INITIAL PROMPT AND MESSAGE HISTORY ---
console.log('[STEP 2] Preparing initial user prompt...');
const userPrompt = "What's the weather like in Paris?";
const messages = [createUserMessage(userPrompt)];
console.log(`๐ค User > ${userPrompt}`);
// --- STEP 3: FIRST API CALL - GET THE TOOL CALL REQUEST ---
console.log('\n[STEP 3] Making the first API call to get the model\'s tool request...');
let toolCalls = [];
let assistantFirstResponse = ''; // The model might say something before calling a tool.
process.stdout.write('๐ค Assistant (thinking) > ');
for await (const event of chatSession.stream(messages)) {
if (event.type === 'text') {
process.stdout.write(event.content);
assistantFirstResponse += event.content;
} else if (event.type === 'tool_calls') {
console.log(`\n[INFO] Intercepted a request to call ${event.calls.length} tool(s).`);
toolCalls = event.calls;
}
}
process.stdout.write('\n');
// Add the model's initial text (if any) to history.
if (assistantFirstResponse) {
messages.push(createAssistantTextMessage(assistantFirstResponse));
}
// --- STEP 4: EXECUTE THE TOOL LOCALLY ---
console.log('\n[STEP 4] Executing the requested tool locally...');
const toolResults = await executeTools(toolCalls, [weatherTool]);
console.log('[INFO] Tool execution finished.');
// --- STEP 5: UPDATE MESSAGE HISTORY WITH TOOL RESULTS ---
console.log('[STEP 5] Updating message history with the tool call and its result...');
toolResults.forEach((result, index) => {
const call = toolCalls[index];
const resultValue = result.success ? result.result : { error: result.error };
messages.push(createAssistantToolCallMessage(call.id, call.name, call.arguments));
messages.push(createToolResultMessage(call.id, call.name, JSON.stringify(resultValue)));
});
console.log('[INFO] History updated. Ready for the final API call.');
// --- STEP 6: SECOND API CALL - GET THE FINAL NATURAL LANGUAGE RESPONSE ---
console.log('\n[STEP 6] Making the second API call to get the final summary...');
let finalResponse = '';
process.stdout.write('๐ค Assistant (summarizing) > ');
for await (const event of chatSession.stream(messages)) {
if (event.type === 'text') {
process.stdout.write(event.content);
finalResponse += event.content;
}
}
process.stdout.write('\n');
console.log('\n[INFO] Stream finished. Response fully received.');
// --- FINAL RESULT ---
console.log('\n\n--- FINAL RESULT ---');
console.log(`๐ค User > ${userPrompt}`);
console.log(`๐ค Assistant > ${finalResponse}`);
console.log('--------------------');
console.log('Notice the two-step process: first the tool call, then the final answer.');
} catch (error) {
console.error('\n\n[FATAL ERROR] An error occurred during the operation.');
console.error(' Message:', error.message);
if (error.status) { console.error(' API Status:', error.status); }
if (error.details) { console.error(' Details:', JSON.stringify(error.details, null, 2)); }
if (error.stack) { console.error(' Stack:', error.stack); }
console.error('\n Possible causes: Invalid token, network issues, or an API service problem.');
process.exit(1);
}
}
main().catch(console.error);
Expected Output
The manual loop makes the agent\'s process explicit. The output shows the initial user prompt, the log from the local tool execution, and finally the model\'s summarized answer based on the tool\'s result.
๐ค User > What\'s the weather like in Paris?
๐ค Assistant (thinking) >
[INFO] Intercepted a request to call 1 tool(s).
[TOOL EXECUTING] Getting weather for Paris...
[INFO] Tool execution finished.
[INFO] History updated. Ready for the final API call.
๐ค Assistant (summarizing) > The current weather in Paris is 14 degrees Celsius.
--- FINAL RESULT ---
๐ค User > What\'s the weather like in Paris?
๐ค Assistant > The current weather in Paris is 14 degrees Celsius.
--------------------