LLM support

HyperAgent can run LLM models that extend the Langchain BaseChatModel class. You can see a list of supported models here

All that is required is initializing the model with required parameters, and then passing that through the llm parameter in the HyperAgent constructor.

With OpenAI GPT-4o

import "dotenv/config";
import HyperAgent from "@hyperbrowser/agent";

import chalk from "chalk";
import { ChatOpenAI } from "@langchain/openai";

const TASK =
  "Go to hackernews, and find if there's any SHOW HN post up there. If it is, then tell me the title of the post.";

async function runEval() {
  const llm = new ChatOpenAI({
    apiKey: process.env.OPENAI_API_KEY,
    model: "gpt-4o",
  });

  const agent = new HyperAgent({
    llm: llm,
    debug: true,
  });

  console.log(`\n${chalk.green("Running agent with GPT-4o")}\n`);

  const result = await agent.executeTask(TASK, {
    debugOnAgentOutput: (agentOutput) => {
      console.log("\n" + chalk.cyan.bold("===== AGENT OUTPUT ====="));
      console.dir(agentOutput, { depth: null, colors: true });
      console.log(chalk.cyan.bold("===============") + "\n");
    },
    onStep: (step) => {
      console.log("\n" + chalk.cyan.bold(`===== STEP ${step.idx} =====`));
      console.dir(step, { depth: null, colors: true });
      console.log(chalk.cyan.bold("===============") + "\n");
    },
  });
  await agent.closeAgent();
  console.log(chalk.green.bold("\nResult:"));
  console.log(chalk.white(result.output));
  return result;
}

(async () => {
  await runEval();
})().catch((error) => {
  console.error(chalk.red("Error:"), error);
  process.exit(1);
});

With Anthropic Claude 3.7 Sonnet

import "dotenv/config";
import HyperAgent from "@hyperbrowser/agent";

import chalk from "chalk";
import { ChatAnthropic } from "@langchain/anthropic";

const TASK =
  "Go to hackernews, and find if there's any SHOW HN post up there. If it is, then tell me the title of the post.";

async function runEval() {
  const llm = new ChatAnthropic({
    apiKey: process.env.ANTHROPIC_API_KEY,
    model: "claude-3-7-sonnet-latest",
  });

  const agent = new HyperAgent({
    llm: llm,
  });

  console.log(`\n${chalk.green("Running agent with Claude Sonnet 3.7")}\n`);

  const result = await agent.executeTask(TASK, {
    debugOnAgentOutput: (agentOutput) => {
      console.log("\n" + chalk.cyan.bold("===== AGENT OUTPUT ====="));
      console.dir(agentOutput, { depth: null, colors: true });
      console.log(chalk.cyan.bold("===============") + "\n");
    },
    onStep: (step) => {
      console.log("\n" + chalk.cyan.bold(`===== STEP ${step.idx} =====`));
      console.dir(step, { depth: null, colors: true });
      console.log(chalk.cyan.bold("===============") + "\n");
    },
  });
  await agent.closeAgent();
  console.log(chalk.green.bold("\nResult:"));
  console.log(chalk.white(result.output));
  return result;
}

(async () => {
  await runEval();
})().catch((error) => {
  console.error(chalk.red("Error:"), error);
  process.exit(1);
});

Last updated