const API_URL = "https://sources.graphorlm.com/prebuilt-rag";
const API_TOKEN = "YOUR_API_TOKEN";
async function retrieveChunks(query, fileNames = null) {
const payload = { query };
if (fileNames && fileNames.length) payload.file_names = fileNames;
const response = await fetch(API_URL, {
method: "POST",
headers: {
"Authorization": `Bearer ${API_TOKEN}`,
"Content-Type": "application/json"
},
body: JSON.stringify(payload)
});
return response.json();
}
function buildContext(chunks) {
return chunks.chunks.map(chunk =>
`[Source: ${chunk.file_name}, Page ${chunk.page_number}]\n${chunk.text}`
).join("\n\n");
}
async function generateAnswer(question, context) {
// Use your preferred LLM API (OpenAI, Anthropic, etc.)
const response = await fetch("https://api.openai.com/v1/chat/completions", {
method: "POST",
headers: {
"Authorization": `Bearer ${OPENAI_API_KEY}`,
"Content-Type": "application/json"
},
body: JSON.stringify({
model: "gpt-4",
messages: [
{ role: "system", content: "Answer questions based on the provided context." },
{ role: "user", content: `Context:\n${context}\n\nQuestion: ${question}` }
]
})
});
const data = await response.json();
return data.choices[0].message.content;
}
// Full RAG pipeline
async function askQuestion(question, fileNames = null) {
const chunks = await retrieveChunks(question, fileNames);
const context = buildContext(chunks);
const answer = await generateAnswer(question, context);
return { answer, sources: chunks.chunks };
}
// Usage
const result = await askQuestion("What are the payment terms?");
console.log(result.answer);
console.log("Sources:", result.sources.map(s => s.file_name));