Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

context_length_exceeded | This model's maximum context length is 16385 tokens. However, your messages resulted in 59678 tokens. Please reduce the length of the messages | invalid_request_error #6988

Closed
AshokJangidBDApp opened this issue Mar 6, 2024 · 4 comments
Labels

Comments

@AshokJangidBDApp
Copy link

AshokJangidBDApp commented Mar 6, 2024

const dotenv = require('dotenv');
const { DataSource } = require("typeorm");
const { SqlDatabase } = require("langchain/sql_db");
const { ChatOpenAI } = require("@langchain/openai");
const { RunnablePassthrough, RunnableSequence } = require("@langchain/core/runnables");
const { PromptTemplate } = require("@langchain/core/prompts");
const { StringOutputParser } = require("@langchain/core/output_parsers");
dotenv.config();
async function main() {
    try {
      console.log("Ai chat testing...");
      // Set up database connection
      const datasource = new DataSource({
        type: "mysql",
        host: process.env.DB_HOST,
        port: process.env.DB_PORT,
        username: process.env.DB_USERNAME,
        password: process.env.DB_PASSWORD,
        database: process.env.DB_DATABASE,
        dialect: process.env.DB_DIALECT,
      });
      const db = await SqlDatabase.fromDataSourceParams({ appDataSource: datasource });
      // Prepare OpenAI model
      const model = new ChatOpenAI({
        openAIApiKey: "sk-888******************************************6BY",
      });
      // Define prompt templates
      const prompt = PromptTemplate.fromTemplate(`Based on the table schema below, write a SQL query that would answer the user's question:
  {schema}
  Question: {question}
  SQL Query:`);
      const finalResponsePrompt = PromptTemplate.fromTemplate(`Based on the table schema below, question, sql query, and sql response, write a natural language response:
  {schema}
  Question: {question}
  SQL Query: {query}
  SQL Response: {response}`);
      // Define runnable chains
      const sqlQueryGeneratorChain = RunnableSequence.from([
        RunnablePassthrough.assign({
          schema: async () => db.getTableInfo(),
        }),
        prompt,
        model.bind({ stop: ["\nSQLResult:"] }),
        new StringOutputParser(),
      ]);
      // console.log("sqlQueryGeneratorChain =====================>", finalResponsePrompt);
      const fullChain = RunnableSequence.from([
        RunnablePassthrough.assign({
          query: sqlQueryGeneratorChain,
        }),
        {
          schema: async () => db.getTableInfo(),
          question: (input) => input.question,
          query: (input) => input.query,
          response: (input) => db.run(input.query),
        },
        finalResponsePrompt,
        model,
      ]);
      console.log("fullChain =====================>", fullChain);
     // Define function to split messages
function splitMessage(message, maxLength) {
    const segments = [];
    for (let i = 0; i < message.length; i += maxLength) {
      segments.push(message.slice(i, i + maxLength));
    }
    return segments;
  }
  // Inside your main function
  const question = "lowest service plan?";
  const segments = splitMessage(question, 50); // Adjust maximum length as needed
  const finalResponses = [];
// Define a function to optimize messages by reducing token count
function optimizeMessage(message) {
    // Example: Removing stop words
    const stopWords = ["and", "the", "of", "to", "in", "a", "is", "that", "it", "on", "for", "with", "as", "at", "by", "this", "from", "are", "you", "or", "we", "an", "be", "your", "not", "have", "but", "which", "they", "will", "can"];
    const words = message.split(" ");
    const optimizedWords = words.filter(word => !stopWords.includes(word.toLowerCase()));
    const optimizedMessage = optimizedWords.join(" ");
    // Return the optimized message
    return optimizedMessage;
}
// Set up runnable chains and invoke the model with optimized messages
const optimizedSegments = segments.map(segment => optimizeMessage(segment));
for (const segment of optimizedSegments) {
    const finalResponse = await fullChain.invoke({ question: segment });
    finalResponses.push(finalResponse);
} 
      // Output final responses
      console.log("Final responses:", finalResponses);
    } catch (error) {
      console.error('Error:', error);
    }
  } 
  // Call the main function
  main();
  
  
  
![image](https://github.com/Significant-Gravitas/AutoGPT/assets/133368241/95c6eaca-f79c-4a32-bf04-2cf4dbc8c761)
@AshokJangidBDApp
Copy link
Author

image

@AshokJangidBDApp
Copy link
Author

AshokJangidBDApp commented Mar 6, 2024

import dotenv from 'dotenv';
dotenv.config();

import { DataSource } from "typeorm";
import { SqlDatabase } from "langchain/sql_db";
import { ChatOpenAI } from "@langchain/openai";
import {
  RunnablePassthrough,
  RunnableSequence,
} from "@langchain/core/runnables";
import { PromptTemplate } from "@langchain/core/prompts";
import { StringOutputParser } from "@langchain/core/output_parsers";

const datasource = new DataSource({
  type: "mysql",
  host: process.env.DB_HOST,
  port: process.env.DB_PORT,
  username: process.env.DB_USERNAME,
  password: process.env.DB_PASSWORD,
  database: process.env.DB_NAME,
  dialect: process.env.DB_DIALECT,
});

const db = await SqlDatabase.fromDataSourceParams({
  appDataSource: datasource,
});

const prompt = PromptTemplate.fromTemplate(`Based on the table schema below, write a SQL query that would answer the user's question:
{schema}

Question: {question}
SQL Query:`);

const model = new ChatOpenAI({
  openAIApiKey: "sk-XbcwjbY***********lbkFJ3nClZsOpGaSZUgPMK6BY",
});

const sqlQueryGeneratorChain = RunnableSequence.from([
  RunnablePassthrough.assign({
    schema: async () => db.getTableInfo(),
  }),
  prompt,
  model.bind({ stop: ["\nSQLResult:"] }),
  new StringOutputParser(),
]);

const finalResponsePrompt = PromptTemplate.fromTemplate(`Based on the table schema below, question, sql query, and sql response, write a natural language response:
{schema}

Question: {question}
SQL Query: {query}
SQL Response: {response}`);

const fullChain = RunnableSequence.from([
  RunnablePassthrough.assign({
    query: sqlQueryGeneratorChain,
  }),
  {
    schema: async () => db.getTableInfo(),
    question: (input) => input.question,
    query: (input) => input.query,
    response: (input) => db.run(input.query),
  },
  finalResponsePrompt,
  model,
]);

const finalResponse1 = await fullChain.invoke({
  question: ["How many employee present current?"],
});
const finalResponse = await fullChain.invoke({
  question: ["can you give names of present employees?"],
});

console.log(finalResponse, finalResponse1);




**It working fine

Copy link

This issue has automatically been marked as stale because it has not had any activity in the last 50 days. You can unstale it by commenting or removing the label. Otherwise, this issue will be closed in 10 days.

@github-actions github-actions bot added the Stale label Apr 26, 2024
Copy link

github-actions bot commented May 6, 2024

This issue was closed automatically because it has been stale for 10 days with no activity.

@github-actions github-actions bot closed this as not planned Won't fix, can't repro, duplicate, stale May 6, 2024
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
Projects
None yet
Development

No branches or pull requests

1 participant