Did you know that most of the functionality in txtai can be run with configuration? That's right, txtai can dynamically load Embeddings, LLM, RAG and other pipelines with YAML configuration.
Check out this example that loads an Embeddings database via Docker with a couple of lines of YAML config. The example then runs a graph search via the API and plots the results with Sigma.js.
config/config.yml
writable: True
cloud:
provider: huggingface-hub
container: neuml/txtai-wikipedia-slim
embeddings:
docker run -it -p 8000:8000 -v config:/config -e CONFIG=/config/config.yml \
--entrypoint uvicorn neuml/txtai-cpu --host 0.0.0.0 txtai.api:app
curl -s "http://localhost:8000/search?query=roman+empire&limit=10&graph=True" > html/data.json
<!doctype html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<title>Graph Network</title>
<script src="https://cdnjs.cloudflare.com/ajax/libs/sigma.js/2.4.0/sigma.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/graphology/0.25.4/graphology.umd.min.js"></script>
</head>
<body>
<div id="container" style="width: 1250px; height: 600px"</div>
<script>
fetch('data.json')
.then((response) => response.json())
.then((data) => {
// Create a graphology graph
const graph = new graphology.Graph();
for (let node of data.nodes) {
graph.addNode(node.id, {label: "(" + node.id + ") " + node.text.substring(0, 20) + "...",
x: Math.random(), y: Math.random(), size: 15, color: "#03a9f4"});
}
for (let edge of data.links) {
graph.addEdge(edge.source, edge.target, { size: edge.weight, color: "gray"});
}
// Instantiate sigma.js and render the graph
const sigmaInstance = new Sigma(graph, document.getElementById("container"));
})
</script>
</body>
</html>
Learn more here: https://neuml.github.io/txtai/api/