Created
October 9, 2024 10:29
-
-
Save tanpinsiang/34e31369b3024438267711d70ddf8961 to your computer and use it in GitHub Desktop.
This file has been truncated, but you can view the full file.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
{ | |
"openapi": "3.1.0", | |
"info": { | |
"title": "FastAPI", | |
"license": { | |
"name": "Apache 2.0", | |
"url": "https://www.apache.org/licenses/LICENSE-2.0.html" | |
}, | |
"version": "0.1.0", | |
"x-logo": { | |
"url": "https://www.jamaibase.com/favicon.svg" | |
} | |
}, | |
"servers": [ | |
{ | |
"url": "https://api.jamaibase.com" | |
} | |
], | |
"paths": { | |
"/api/v1/models": { | |
"get": { | |
"tags": [ | |
"Large Language Model" | |
], | |
"summary": "List the info of models available.", | |
"description": "List the info of models available with the specified name and capabilities.", | |
"operationId": "get_model_info", | |
"parameters": [ | |
{ | |
"name": "model", | |
"in": "query", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "ID of the requested model.", | |
"examples": [ | |
"openai/gpt-4o-mini" | |
], | |
"default": "", | |
"title": "Model" | |
}, | |
"description": "ID of the requested model." | |
}, | |
{ | |
"name": "capabilities", | |
"in": "query", | |
"required": false, | |
"schema": { | |
"anyOf": [ | |
{ | |
"type": "array", | |
"items": { | |
"type": "string", | |
"enum": [ | |
"completion", | |
"chat", | |
"image", | |
"embed", | |
"rerank" | |
], | |
"title": "ModelCapability" | |
} | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"description": "Filter the model info by model's capabilities. Leave it blank to disable filter.", | |
"examples": [ | |
[ | |
"chat" | |
] | |
], | |
"title": "Capabilities" | |
}, | |
"description": "Filter the model info by model's capabilities. Leave it blank to disable filter." | |
}, | |
{ | |
"name": "openai-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "OpenAI API key.", | |
"default": "", | |
"title": "Openai-Api-Key" | |
}, | |
"description": "OpenAI API key." | |
}, | |
{ | |
"name": "anthropic-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Anthropic API key.", | |
"default": "", | |
"title": "Anthropic-Api-Key" | |
}, | |
"description": "Anthropic API key." | |
}, | |
{ | |
"name": "gemini-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Google Gemini API key.", | |
"default": "", | |
"title": "Gemini-Api-Key" | |
}, | |
"description": "Google Gemini API key." | |
}, | |
{ | |
"name": "cohere-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Cohere API key.", | |
"default": "", | |
"title": "Cohere-Api-Key" | |
}, | |
"description": "Cohere API key." | |
}, | |
{ | |
"name": "groq-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Groq API key.", | |
"default": "", | |
"title": "Groq-Api-Key" | |
}, | |
"description": "Groq API key." | |
}, | |
{ | |
"name": "together-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Together AI API key.", | |
"default": "", | |
"title": "Together-Api-Key" | |
}, | |
"description": "Together AI API key." | |
}, | |
{ | |
"name": "jina-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Jina API key.", | |
"default": "", | |
"title": "Jina-Api-Key" | |
}, | |
"description": "Jina API key." | |
}, | |
{ | |
"name": "voyage-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Voyage API key.", | |
"default": "", | |
"title": "Voyage-Api-Key" | |
}, | |
"description": "Voyage API key." | |
} | |
], | |
"responses": { | |
"200": { | |
"description": "Successful Response", | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"object": { | |
"type": "string", | |
"title": "Object", | |
"description": "Type of API response object.", | |
"default": "chat.model_info", | |
"examples": [ | |
"chat.model_info" | |
] | |
}, | |
"data": { | |
"items": { | |
"properties": { | |
"id": { | |
"type": "string", | |
"title": "Id", | |
"description": "Unique identifier in the form of \"{provider}/{model_id}\". Users will specify this to select a model.", | |
"examples": [ | |
"openai/gpt-4o-mini" | |
] | |
}, | |
"object": { | |
"type": "string", | |
"title": "Object", | |
"description": "Type of API response object.", | |
"default": "model", | |
"examples": [ | |
"model" | |
] | |
}, | |
"name": { | |
"type": "string", | |
"title": "Name", | |
"description": "Name of model.", | |
"examples": [ | |
"OpenAI GPT-4o Mini" | |
] | |
}, | |
"context_length": { | |
"type": "integer", | |
"title": "Context Length", | |
"description": "Context length of model.", | |
"examples": [ | |
16384 | |
] | |
}, | |
"languages": { | |
"items": { | |
"type": "string" | |
}, | |
"type": "array", | |
"title": "Languages", | |
"description": "List of languages which the model is well-versed in.", | |
"examples": [ | |
[ | |
"en" | |
] | |
] | |
}, | |
"owned_by": { | |
"type": "string", | |
"title": "Owned By", | |
"description": "The organization that owns the model. Defaults to the provider in model ID.", | |
"default": "", | |
"examples": [ | |
"openai" | |
] | |
}, | |
"capabilities": { | |
"items": { | |
"type": "string", | |
"enum": [ | |
"completion", | |
"chat", | |
"image", | |
"embed", | |
"rerank" | |
] | |
}, | |
"type": "array", | |
"title": "Capabilities", | |
"description": "List of capabilities of model.", | |
"examples": [ | |
[ | |
"chat" | |
] | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"id", | |
"name", | |
"context_length", | |
"languages", | |
"capabilities" | |
], | |
"title": "ModelInfo" | |
}, | |
"type": "array", | |
"title": "Data", | |
"description": "List of model information." | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"data" | |
], | |
"title": "ModelInfoResponse" | |
} | |
} | |
} | |
}, | |
"422": { | |
"description": "Validation Error", | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"detail": { | |
"items": { | |
"properties": { | |
"loc": { | |
"items": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "integer" | |
} | |
] | |
}, | |
"type": "array", | |
"title": "Location" | |
}, | |
"msg": { | |
"type": "string", | |
"title": "Message" | |
}, | |
"type": { | |
"type": "string", | |
"title": "Error Type" | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"loc", | |
"msg", | |
"type" | |
], | |
"title": "ValidationError" | |
}, | |
"type": "array", | |
"title": "Detail" | |
} | |
}, | |
"type": "object", | |
"title": "HTTPValidationError" | |
} | |
} | |
} | |
} | |
} | |
} | |
}, | |
"/api/v1/model_names": { | |
"get": { | |
"tags": [ | |
"Large Language Model" | |
], | |
"summary": "List the ID of models available.", | |
"description": "List the ID of models available with the specified capabilities with an optional preferred model. If the preferred model is not available, then return the first available model.", | |
"operationId": "get_model_names", | |
"parameters": [ | |
{ | |
"name": "prefer", | |
"in": "query", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "ID of the preferred model.", | |
"examples": [ | |
"openai/gpt-4o-mini" | |
], | |
"default": "", | |
"title": "Prefer" | |
}, | |
"description": "ID of the preferred model." | |
}, | |
{ | |
"name": "capabilities", | |
"in": "query", | |
"required": false, | |
"schema": { | |
"anyOf": [ | |
{ | |
"type": "array", | |
"items": { | |
"type": "string", | |
"enum": [ | |
"completion", | |
"chat", | |
"image", | |
"embed", | |
"rerank" | |
], | |
"title": "ModelCapability" | |
} | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"description": "Filter the model info by model's capabilities. Leave it blank to disable filter.", | |
"examples": [ | |
[ | |
"chat" | |
] | |
], | |
"title": "Capabilities" | |
}, | |
"description": "Filter the model info by model's capabilities. Leave it blank to disable filter." | |
}, | |
{ | |
"name": "openai-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "OpenAI API key.", | |
"default": "", | |
"title": "Openai-Api-Key" | |
}, | |
"description": "OpenAI API key." | |
}, | |
{ | |
"name": "anthropic-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Anthropic API key.", | |
"default": "", | |
"title": "Anthropic-Api-Key" | |
}, | |
"description": "Anthropic API key." | |
}, | |
{ | |
"name": "gemini-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Google Gemini API key.", | |
"default": "", | |
"title": "Gemini-Api-Key" | |
}, | |
"description": "Google Gemini API key." | |
}, | |
{ | |
"name": "cohere-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Cohere API key.", | |
"default": "", | |
"title": "Cohere-Api-Key" | |
}, | |
"description": "Cohere API key." | |
}, | |
{ | |
"name": "groq-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Groq API key.", | |
"default": "", | |
"title": "Groq-Api-Key" | |
}, | |
"description": "Groq API key." | |
}, | |
{ | |
"name": "together-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Together AI API key.", | |
"default": "", | |
"title": "Together-Api-Key" | |
}, | |
"description": "Together AI API key." | |
}, | |
{ | |
"name": "jina-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Jina API key.", | |
"default": "", | |
"title": "Jina-Api-Key" | |
}, | |
"description": "Jina API key." | |
}, | |
{ | |
"name": "voyage-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Voyage API key.", | |
"default": "", | |
"title": "Voyage-Api-Key" | |
}, | |
"description": "Voyage API key." | |
} | |
], | |
"responses": { | |
"200": { | |
"description": "Successful Response", | |
"content": { | |
"application/json": { | |
"schema": { | |
"type": "array", | |
"items": { | |
"type": "string" | |
}, | |
"title": "Response Get Model Names" | |
} | |
} | |
} | |
}, | |
"422": { | |
"description": "Validation Error", | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"detail": { | |
"items": { | |
"properties": { | |
"loc": { | |
"items": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "integer" | |
} | |
] | |
}, | |
"type": "array", | |
"title": "Location" | |
}, | |
"msg": { | |
"type": "string", | |
"title": "Message" | |
}, | |
"type": { | |
"type": "string", | |
"title": "Error Type" | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"loc", | |
"msg", | |
"type" | |
], | |
"title": "ValidationError" | |
}, | |
"type": "array", | |
"title": "Detail" | |
} | |
}, | |
"type": "object", | |
"title": "HTTPValidationError" | |
} | |
} | |
} | |
} | |
} | |
} | |
}, | |
"/api/v1/chat/completions": { | |
"post": { | |
"tags": [ | |
"Large Language Model" | |
], | |
"summary": "Generate Completions", | |
"description": "Given a list of messages comprising a conversation, the model will return a response.", | |
"operationId": "generate_completions", | |
"parameters": [ | |
{ | |
"name": "openai-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "OpenAI API key.", | |
"default": "", | |
"title": "Openai-Api-Key" | |
}, | |
"description": "OpenAI API key." | |
}, | |
{ | |
"name": "anthropic-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Anthropic API key.", | |
"default": "", | |
"title": "Anthropic-Api-Key" | |
}, | |
"description": "Anthropic API key." | |
}, | |
{ | |
"name": "gemini-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Google Gemini API key.", | |
"default": "", | |
"title": "Gemini-Api-Key" | |
}, | |
"description": "Google Gemini API key." | |
}, | |
{ | |
"name": "cohere-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Cohere API key.", | |
"default": "", | |
"title": "Cohere-Api-Key" | |
}, | |
"description": "Cohere API key." | |
}, | |
{ | |
"name": "groq-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Groq API key.", | |
"default": "", | |
"title": "Groq-Api-Key" | |
}, | |
"description": "Groq API key." | |
}, | |
{ | |
"name": "together-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Together AI API key.", | |
"default": "", | |
"title": "Together-Api-Key" | |
}, | |
"description": "Together AI API key." | |
}, | |
{ | |
"name": "jina-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Jina API key.", | |
"default": "", | |
"title": "Jina-Api-Key" | |
}, | |
"description": "Jina API key." | |
}, | |
{ | |
"name": "voyage-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Voyage API key.", | |
"default": "", | |
"title": "Voyage-Api-Key" | |
}, | |
"description": "Voyage API key." | |
} | |
], | |
"requestBody": { | |
"required": true, | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"id": { | |
"type": "string", | |
"title": "Id", | |
"description": "Chat ID. Must be unique against document ID for it to be embeddable. Defaults to ''.", | |
"default": "" | |
}, | |
"model": { | |
"type": "string", | |
"title": "Model", | |
"description": "ID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API.", | |
"default": "" | |
}, | |
"messages": { | |
"items": { | |
"properties": { | |
"role": { | |
"type": "string", | |
"enum": [ | |
"system", | |
"user", | |
"assistant" | |
], | |
"title": "ChatRole", | |
"description": "Represents who said a chat message." | |
}, | |
"content": { | |
"type": "string", | |
"title": "Content" | |
}, | |
"name": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Name" | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"role", | |
"content" | |
], | |
"title": "ChatEntry", | |
"description": "Represents a message in the chat context." | |
}, | |
"type": "array", | |
"minItems": 1, | |
"title": "Messages", | |
"description": "A list of messages comprising the conversation so far." | |
}, | |
"rag_params": { | |
"anyOf": [ | |
{ | |
"properties": { | |
"table_id": { | |
"type": "string", | |
"minLength": 2, | |
"title": "Table Id", | |
"description": "Knowledge Table ID", | |
"examples": [ | |
"my-dataset" | |
] | |
}, | |
"reranking_model": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Reranking Model", | |
"description": "Reranking model to use for hybrid search.", | |
"examples": [ | |
"cohere/rerank-multilingual-v3.0", | |
null | |
] | |
}, | |
"search_query": { | |
"type": "string", | |
"title": "Search Query", | |
"description": "Query used to retrieve items from the KB database. If not provided (default), it will be generated using LLM.", | |
"default": "" | |
}, | |
"k": { | |
"type": "integer", | |
"maximum": 1024, | |
"exclusiveMinimum": 0, | |
"title": "K", | |
"description": "Top-k closest text in terms of embedding distance. Must be in [1, 1024]. Defaults to 3.", | |
"default": 3, | |
"examples": [ | |
3 | |
] | |
}, | |
"rerank": { | |
"type": "boolean", | |
"title": "Rerank", | |
"description": "Flag to perform rerank on the retrieved results. Defaults to True.", | |
"default": true, | |
"examples": [ | |
true, | |
false | |
] | |
}, | |
"concat_reranker_input": { | |
"type": "boolean", | |
"title": "Concat Reranker Input", | |
"description": "Flag to concat title and content as reranker input. Defaults to False.", | |
"default": false, | |
"examples": [ | |
true, | |
false | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"table_id" | |
], | |
"title": "RAGParams" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"description": "Retrieval Augmented Generation search params. Defaults to None (disabled).", | |
"examples": [ | |
null | |
] | |
}, | |
"temperature": { | |
"type": "number", | |
"maximum": 2, | |
"minimum": 0.001, | |
"title": "Temperature", | |
"description": " What sampling temperature to use, in [0.001, 2.0].\n Higher values like 0.8 will make the output more random,\n while lower values like 0.2 will make it more focused and deterministic.\n", | |
"default": 0.2, | |
"examples": [ | |
0.2 | |
] | |
}, | |
"top_p": { | |
"type": "number", | |
"maximum": 1, | |
"minimum": 0.001, | |
"title": "Top P", | |
"description": " An alternative to sampling with temperature, called nucleus sampling,\n where the model considers the results of the tokens with top_p probability mass.\n So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n Must be in [0.001, 1.0].\n", | |
"default": 0.6, | |
"examples": [ | |
0.6 | |
] | |
}, | |
"n": { | |
"type": "integer", | |
"title": "N", | |
"description": "How many chat completion choices to generate for each input message.", | |
"default": 1, | |
"examples": [ | |
1 | |
] | |
}, | |
"stream": { | |
"type": "boolean", | |
"title": "Stream", | |
"description": "\n If set, partial message deltas will be sent, like in ChatGPT.\n Tokens will be sent as server-sent events as they become available,\n with the stream terminated by a 'data: [DONE]' message.\n", | |
"default": true, | |
"examples": [ | |
true | |
] | |
}, | |
"stop": { | |
"anyOf": [ | |
{ | |
"items": { | |
"type": "string" | |
}, | |
"type": "array" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Stop", | |
"description": "Up to 4 sequences where the API will stop generating further tokens.", | |
"examples": [ | |
null | |
] | |
}, | |
"max_tokens": { | |
"type": "integer", | |
"exclusiveMinimum": 0, | |
"title": "Max Tokens", | |
"description": " The maximum number of tokens to generate in the chat completion.\n Must be in [1, context_length - 1). Default is 2048.\n The total length of input tokens and generated tokens is limited by the model's context length.\n", | |
"default": 2048, | |
"examples": [ | |
2048 | |
] | |
}, | |
"presence_penalty": { | |
"type": "number", | |
"title": "Presence Penalty", | |
"description": " Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far,\n increasing the model's likelihood to talk about new topics.\n", | |
"default": 0, | |
"examples": [ | |
0 | |
] | |
}, | |
"frequency_penalty": { | |
"type": "number", | |
"title": "Frequency Penalty", | |
"description": " Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far,\n decreasing the model's likelihood to repeat the same line verbatim.\n", | |
"default": 0, | |
"examples": [ | |
0 | |
] | |
}, | |
"logit_bias": { | |
"type": "object", | |
"title": "Logit Bias", | |
"description": " Modify the likelihood of specified tokens appearing in the completion.\n Accepts a json object that maps tokens (specified by their token ID in the tokenizer)\n to an associated bias value from -100 to 100.\n Mathematically, the bias is added to the logits generated by the model prior to sampling.\n The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection;\n values like -100 or 100 should result in a ban or exclusive selection of the relevant token.\n", | |
"default": {}, | |
"examples": [ | |
{} | |
] | |
}, | |
"user": { | |
"type": "string", | |
"title": "User", | |
"description": "A unique identifier representing your end-user. For monitoring and debugging purposes.", | |
"default": "", | |
"examples": [ | |
"" | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"messages" | |
], | |
"title": "ChatRequest" | |
} | |
} | |
} | |
}, | |
"responses": { | |
"200": { | |
"description": "Successful Response", | |
"content": { | |
"application/json": { | |
"schema": {} | |
} | |
} | |
}, | |
"422": { | |
"description": "Validation Error", | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"detail": { | |
"items": { | |
"properties": { | |
"loc": { | |
"items": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "integer" | |
} | |
] | |
}, | |
"type": "array", | |
"title": "Location" | |
}, | |
"msg": { | |
"type": "string", | |
"title": "Message" | |
}, | |
"type": { | |
"type": "string", | |
"title": "Error Type" | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"loc", | |
"msg", | |
"type" | |
], | |
"title": "ValidationError" | |
}, | |
"type": "array", | |
"title": "Detail" | |
} | |
}, | |
"type": "object", | |
"title": "HTTPValidationError" | |
} | |
} | |
} | |
} | |
} | |
} | |
}, | |
"/api/v1/embeddings": { | |
"post": { | |
"tags": [ | |
"Large Language Model" | |
], | |
"summary": "Generate Embeddings", | |
"description": "Get a vector representation of a given input that can be easily consumed by machine learning models and algorithms. Note that the vectors are NOT normalized.", | |
"operationId": "generate_embeddings", | |
"parameters": [ | |
{ | |
"name": "openai-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "OpenAI API key.", | |
"default": "", | |
"title": "Openai-Api-Key" | |
}, | |
"description": "OpenAI API key." | |
}, | |
{ | |
"name": "anthropic-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Anthropic API key.", | |
"default": "", | |
"title": "Anthropic-Api-Key" | |
}, | |
"description": "Anthropic API key." | |
}, | |
{ | |
"name": "gemini-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Google Gemini API key.", | |
"default": "", | |
"title": "Gemini-Api-Key" | |
}, | |
"description": "Google Gemini API key." | |
}, | |
{ | |
"name": "cohere-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Cohere API key.", | |
"default": "", | |
"title": "Cohere-Api-Key" | |
}, | |
"description": "Cohere API key." | |
}, | |
{ | |
"name": "groq-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Groq API key.", | |
"default": "", | |
"title": "Groq-Api-Key" | |
}, | |
"description": "Groq API key." | |
}, | |
{ | |
"name": "together-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Together AI API key.", | |
"default": "", | |
"title": "Together-Api-Key" | |
}, | |
"description": "Together AI API key." | |
}, | |
{ | |
"name": "jina-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Jina API key.", | |
"default": "", | |
"title": "Jina-Api-Key" | |
}, | |
"description": "Jina API key." | |
}, | |
{ | |
"name": "voyage-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Voyage API key.", | |
"default": "", | |
"title": "Voyage-Api-Key" | |
}, | |
"description": "Voyage API key." | |
} | |
], | |
"requestBody": { | |
"required": true, | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"input": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"items": { | |
"type": "string" | |
}, | |
"type": "array" | |
} | |
], | |
"title": "Input", | |
"description": "Input text to embed, encoded as a string or array of strings (to embed multiple inputs in a single request). The input must not exceed the max input tokens for the model, and cannot contain empty string.", | |
"examples": [ | |
"What is a llama?", | |
[ | |
"What is a llama?", | |
"What is an alpaca?" | |
] | |
] | |
}, | |
"model": { | |
"type": "string", | |
"title": "Model", | |
"description": "The ID of the model to use. You can use the List models API to see all of your available models.", | |
"examples": [ | |
"openai/text-embedding-3-small-512" | |
] | |
}, | |
"type": { | |
"type": "string", | |
"enum": [ | |
"query", | |
"document" | |
], | |
"title": "Type", | |
"description": "Whether the input text is a \"query\" (used to retrieve) or a \"document\" (to be retrieved).", | |
"default": "document", | |
"examples": [ | |
"query", | |
"document" | |
] | |
}, | |
"encoding_format": { | |
"type": "string", | |
"enum": [ | |
"float", | |
"base64" | |
], | |
"title": "Encoding Format", | |
"description": "_Optional_. The format to return the embeddings in. Can be either \"float\" or \"base64\". `base64` string should be decoded as a `float32` array. Example: `np.frombuffer(base64.b64decode(response), dtype=np.float32)`", | |
"default": "float", | |
"examples": [ | |
"float", | |
"base64" | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"input", | |
"model" | |
], | |
"title": "EmbeddingRequest" | |
} | |
} | |
} | |
}, | |
"responses": { | |
"200": { | |
"description": "Successful Response", | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"object": { | |
"type": "string", | |
"title": "Object", | |
"description": "The object type, which is always \"list\".", | |
"default": "list", | |
"examples": [ | |
"list" | |
] | |
}, | |
"data": { | |
"items": { | |
"properties": { | |
"object": { | |
"type": "string", | |
"title": "Object", | |
"description": "The object type, which is always \"embedding\".", | |
"default": "embedding", | |
"examples": [ | |
"embedding" | |
] | |
}, | |
"embedding": { | |
"anyOf": [ | |
{ | |
"items": { | |
"type": "number" | |
}, | |
"type": "array" | |
}, | |
{ | |
"type": "string" | |
} | |
], | |
"title": "Embedding", | |
"description": "The embedding vector, which is a list of floats or a base64-encoded string. The length of vector depends on the model.", | |
"examples": [ | |
[ | |
0, | |
1, | |
2 | |
], | |
[] | |
] | |
}, | |
"index": { | |
"type": "integer", | |
"title": "Index", | |
"description": "The index of the embedding in the list of embeddings.", | |
"default": 0, | |
"examples": [ | |
0, | |
1 | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"embedding" | |
], | |
"title": "EmbeddingResponseData" | |
}, | |
"type": "array", | |
"title": "Data", | |
"description": "List of `EmbeddingResponseData`.", | |
"examples": [ | |
[ | |
{ | |
"embedding": [ | |
0, | |
1, | |
2 | |
], | |
"index": 0, | |
"object": "embedding" | |
} | |
] | |
] | |
}, | |
"model": { | |
"type": "string", | |
"title": "Model", | |
"description": "The ID of the model used.", | |
"examples": [ | |
"openai/text-embedding-3-small-512" | |
] | |
}, | |
"usage": { | |
"allOf": [ | |
{ | |
"properties": { | |
"prompt_tokens": { | |
"type": "integer", | |
"title": "Prompt Tokens", | |
"description": "Number of tokens in the prompt.", | |
"default": 0 | |
}, | |
"completion_tokens": { | |
"type": "integer", | |
"title": "Completion Tokens", | |
"description": "Number of tokens in the generated completion.", | |
"default": 0 | |
}, | |
"total_tokens": { | |
"type": "integer", | |
"title": "Total Tokens", | |
"description": "Total number of tokens used in the request (prompt + completion).", | |
"default": 0 | |
} | |
}, | |
"type": "object", | |
"title": "CompletionUsage" | |
} | |
], | |
"description": "The number of tokens consumed.", | |
"default": { | |
"prompt_tokens": 0, | |
"completion_tokens": 0, | |
"total_tokens": 0 | |
}, | |
"examples": [ | |
{ | |
"completion_tokens": 0, | |
"prompt_tokens": 0, | |
"total_tokens": 0 | |
} | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"data", | |
"model" | |
], | |
"title": "EmbeddingResponse" | |
} | |
} | |
} | |
}, | |
"422": { | |
"description": "Validation Error", | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"detail": { | |
"items": { | |
"properties": { | |
"loc": { | |
"items": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "integer" | |
} | |
] | |
}, | |
"type": "array", | |
"title": "Location" | |
}, | |
"msg": { | |
"type": "string", | |
"title": "Message" | |
}, | |
"type": { | |
"type": "string", | |
"title": "Error Type" | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"loc", | |
"msg", | |
"type" | |
], | |
"title": "ValidationError" | |
}, | |
"type": "array", | |
"title": "Detail" | |
} | |
}, | |
"type": "object", | |
"title": "HTTPValidationError" | |
} | |
} | |
} | |
} | |
} | |
} | |
}, | |
"/api/v1/gen_tables/action": { | |
"post": { | |
"tags": [ | |
"Generative Table" | |
], | |
"summary": "Create Action Table", | |
"operationId": "create_action_table", | |
"parameters": [ | |
{ | |
"name": "openai-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "OpenAI API key.", | |
"default": "", | |
"title": "Openai-Api-Key" | |
}, | |
"description": "OpenAI API key." | |
}, | |
{ | |
"name": "anthropic-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Anthropic API key.", | |
"default": "", | |
"title": "Anthropic-Api-Key" | |
}, | |
"description": "Anthropic API key." | |
}, | |
{ | |
"name": "gemini-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Google Gemini API key.", | |
"default": "", | |
"title": "Gemini-Api-Key" | |
}, | |
"description": "Google Gemini API key." | |
}, | |
{ | |
"name": "cohere-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Cohere API key.", | |
"default": "", | |
"title": "Cohere-Api-Key" | |
}, | |
"description": "Cohere API key." | |
}, | |
{ | |
"name": "groq-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Groq API key.", | |
"default": "", | |
"title": "Groq-Api-Key" | |
}, | |
"description": "Groq API key." | |
}, | |
{ | |
"name": "together-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Together AI API key.", | |
"default": "", | |
"title": "Together-Api-Key" | |
}, | |
"description": "Together AI API key." | |
}, | |
{ | |
"name": "jina-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Jina API key.", | |
"default": "", | |
"title": "Jina-Api-Key" | |
}, | |
"description": "Jina API key." | |
}, | |
{ | |
"name": "voyage-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Voyage API key.", | |
"default": "", | |
"title": "Voyage-Api-Key" | |
}, | |
"description": "Voyage API key." | |
} | |
], | |
"requestBody": { | |
"required": true, | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"id": { | |
"type": "string", | |
"pattern": "^[A-Za-z0-9]([A-Za-z0-9._-]{0,98}[A-Za-z0-9])?$", | |
"title": "Id", | |
"description": "Table name." | |
}, | |
"version": { | |
"type": "string", | |
"title": "Version", | |
"description": "Table version, following owl version.", | |
"default": "0.3.0" | |
}, | |
"meta": { | |
"type": "object", | |
"title": "Meta", | |
"description": "Additional metadata about the table.", | |
"default": {} | |
}, | |
"cols": { | |
"items": { | |
"properties": { | |
"id": { | |
"type": "string", | |
"pattern": "^[A-Za-z0-9]([A-Za-z0-9 _-]{0,98}[A-Za-z0-9])?$", | |
"title": "Id", | |
"description": "Column name." | |
}, | |
"dtype": { | |
"allOf": [ | |
{ | |
"type": "string", | |
"enum": [ | |
"int", | |
"float", | |
"bool", | |
"str" | |
], | |
"title": "DtypeCreateEnum" | |
} | |
], | |
"description": "Column data type, one of [\"int\", \"float\", \"bool\", \"str\"]", | |
"default": "str" | |
}, | |
"vlen": { | |
"type": "integer", | |
"minimum": 0, | |
"title": "Vlen", | |
"description": "_Optional_. Vector length. If this is larger than zero, then `dtype` must be one of the floating data types. Defaults to zero.", | |
"default": 0 | |
}, | |
"index": { | |
"type": "boolean", | |
"title": "Index", | |
"description": "_Optional_. Whether to build full-text-search (FTS) or vector index for this column. Only applies to string and vector columns. Defaults to True.", | |
"default": true | |
}, | |
"gen_config": { | |
"anyOf": [ | |
{ | |
"properties": { | |
"id": { | |
"type": "string", | |
"title": "Id", | |
"description": "Chat ID. Must be unique against document ID for it to be embeddable. Defaults to ''.", | |
"default": "" | |
}, | |
"model": { | |
"type": "string", | |
"title": "Model", | |
"description": "ID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API.", | |
"default": "" | |
}, | |
"messages": { | |
"items": { | |
"properties": { | |
"role": { | |
"type": "string", | |
"enum": [ | |
"system", | |
"user", | |
"assistant" | |
], | |
"title": "ChatRole", | |
"description": "Represents who said a chat message." | |
}, | |
"content": { | |
"type": "string", | |
"title": "Content" | |
}, | |
"name": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Name" | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"role", | |
"content" | |
], | |
"title": "ChatEntry", | |
"description": "Represents a message in the chat context." | |
}, | |
"type": "array", | |
"minItems": 1, | |
"title": "Messages", | |
"description": "A list of messages comprising the conversation so far." | |
}, | |
"rag_params": { | |
"anyOf": [ | |
{ | |
"properties": { | |
"table_id": { | |
"type": "string", | |
"minLength": 2, | |
"title": "Table Id", | |
"description": "Knowledge Table ID", | |
"examples": [ | |
"my-dataset" | |
] | |
}, | |
"reranking_model": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Reranking Model", | |
"description": "Reranking model to use for hybrid search.", | |
"examples": [ | |
"cohere/rerank-multilingual-v3.0", | |
null | |
] | |
}, | |
"search_query": { | |
"type": "string", | |
"title": "Search Query", | |
"description": "Query used to retrieve items from the KB database. If not provided (default), it will be generated using LLM.", | |
"default": "" | |
}, | |
"k": { | |
"type": "integer", | |
"maximum": 1024, | |
"exclusiveMinimum": 0, | |
"title": "K", | |
"description": "Top-k closest text in terms of embedding distance. Must be in [1, 1024]. Defaults to 3.", | |
"default": 3, | |
"examples": [ | |
3 | |
] | |
}, | |
"rerank": { | |
"type": "boolean", | |
"title": "Rerank", | |
"description": "Flag to perform rerank on the retrieved results. Defaults to True.", | |
"default": true, | |
"examples": [ | |
true, | |
false | |
] | |
}, | |
"concat_reranker_input": { | |
"type": "boolean", | |
"title": "Concat Reranker Input", | |
"description": "Flag to concat title and content as reranker input. Defaults to False.", | |
"default": false, | |
"examples": [ | |
true, | |
false | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"table_id" | |
], | |
"title": "RAGParams" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"description": "Retrieval Augmented Generation search params. Defaults to None (disabled).", | |
"examples": [ | |
null | |
] | |
}, | |
"temperature": { | |
"type": "number", | |
"maximum": 2, | |
"minimum": 0.001, | |
"title": "Temperature", | |
"description": " What sampling temperature to use, in [0.001, 2.0].\n Higher values like 0.8 will make the output more random,\n while lower values like 0.2 will make it more focused and deterministic.\n", | |
"default": 0.2, | |
"examples": [ | |
0.2 | |
] | |
}, | |
"top_p": { | |
"type": "number", | |
"maximum": 1, | |
"minimum": 0.001, | |
"title": "Top P", | |
"description": " An alternative to sampling with temperature, called nucleus sampling,\n where the model considers the results of the tokens with top_p probability mass.\n So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n Must be in [0.001, 1.0].\n", | |
"default": 0.6, | |
"examples": [ | |
0.6 | |
] | |
}, | |
"n": { | |
"type": "integer", | |
"title": "N", | |
"description": "How many chat completion choices to generate for each input message.", | |
"default": 1, | |
"examples": [ | |
1 | |
] | |
}, | |
"stream": { | |
"type": "boolean", | |
"title": "Stream", | |
"description": "\n If set, partial message deltas will be sent, like in ChatGPT.\n Tokens will be sent as server-sent events as they become available,\n with the stream terminated by a 'data: [DONE]' message.\n", | |
"default": true, | |
"examples": [ | |
true | |
] | |
}, | |
"stop": { | |
"anyOf": [ | |
{ | |
"items": { | |
"type": "string" | |
}, | |
"type": "array" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Stop", | |
"description": "Up to 4 sequences where the API will stop generating further tokens.", | |
"examples": [ | |
null | |
] | |
}, | |
"max_tokens": { | |
"type": "integer", | |
"exclusiveMinimum": 0, | |
"title": "Max Tokens", | |
"description": " The maximum number of tokens to generate in the chat completion.\n Must be in [1, context_length - 1). Default is 2048.\n The total length of input tokens and generated tokens is limited by the model's context length.\n", | |
"default": 2048, | |
"examples": [ | |
2048 | |
] | |
}, | |
"presence_penalty": { | |
"type": "number", | |
"title": "Presence Penalty", | |
"description": " Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far,\n increasing the model's likelihood to talk about new topics.\n", | |
"default": 0, | |
"examples": [ | |
0 | |
] | |
}, | |
"frequency_penalty": { | |
"type": "number", | |
"title": "Frequency Penalty", | |
"description": " Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far,\n decreasing the model's likelihood to repeat the same line verbatim.\n", | |
"default": 0, | |
"examples": [ | |
0 | |
] | |
}, | |
"logit_bias": { | |
"type": "object", | |
"title": "Logit Bias", | |
"description": " Modify the likelihood of specified tokens appearing in the completion.\n Accepts a json object that maps tokens (specified by their token ID in the tokenizer)\n to an associated bias value from -100 to 100.\n Mathematically, the bias is added to the logits generated by the model prior to sampling.\n The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection;\n values like -100 or 100 should result in a ban or exclusive selection of the relevant token.\n", | |
"default": {}, | |
"examples": [ | |
{} | |
] | |
}, | |
"user": { | |
"type": "string", | |
"title": "User", | |
"description": "A unique identifier representing your end-user. For monitoring and debugging purposes.", | |
"default": "", | |
"examples": [ | |
"" | |
] | |
}, | |
"object": { | |
"type": "string", | |
"enum": [ | |
"gen_config.chat" | |
], | |
"const": "gen_config.chat", | |
"title": "Object", | |
"description": "The object type, which is always \"gen_config.chat\".", | |
"default": "gen_config.chat", | |
"examples": [ | |
"gen_config.chat" | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"messages" | |
], | |
"title": "ChatGenConfig" | |
}, | |
{ | |
"properties": { | |
"object": { | |
"type": "string", | |
"enum": [ | |
"gen_config.embed" | |
], | |
"const": "gen_config.embed", | |
"title": "Object", | |
"description": "The object type, which is always \"gen_config.embed\".", | |
"default": "gen_config.embed", | |
"examples": [ | |
"gen_config.embed" | |
] | |
}, | |
"embedding_model": { | |
"type": "string", | |
"title": "Embedding Model", | |
"description": "The embedding model to use.", | |
"examples": [ | |
"openai/text-embedding-3-small-512" | |
] | |
}, | |
"source_column": { | |
"type": "string", | |
"title": "Source Column", | |
"description": "The source column for embedding.", | |
"examples": [ | |
"text_column" | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"embedding_model", | |
"source_column" | |
], | |
"title": "EmbedGenConfig" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Gen Config", | |
"description": "_Optional_. Generation config. If provided, then this column will be an \"Output Column\". Table columns on its left can be referenced by `${column-name}`." | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"id" | |
], | |
"title": "ColumnSchemaCreate" | |
}, | |
"type": "array", | |
"title": "Cols", | |
"description": "List of column schema." | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"id", | |
"cols" | |
], | |
"title": "ActionTableSchemaCreate" | |
} | |
} | |
} | |
}, | |
"responses": { | |
"200": { | |
"description": "Successful Response", | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"id": { | |
"type": "string", | |
"title": "Id", | |
"description": "Table name." | |
}, | |
"version": { | |
"type": "string", | |
"title": "Version", | |
"description": "Table version, following owl version.", | |
"default": "0.3.0" | |
}, | |
"meta": { | |
"type": "object", | |
"title": "Meta", | |
"description": "Additional metadata about the table.", | |
"default": {} | |
}, | |
"cols": { | |
"items": { | |
"properties": { | |
"id": { | |
"type": "string", | |
"title": "Id", | |
"description": "Column name." | |
}, | |
"dtype": { | |
"allOf": [ | |
{ | |
"type": "string", | |
"enum": [ | |
"int", | |
"int8", | |
"float", | |
"float32", | |
"float16", | |
"bool", | |
"str", | |
"date-time" | |
], | |
"title": "DtypeEnum" | |
} | |
], | |
"description": "Column data type, one of [\"int\", \"int8\", \"float\", \"float32\", \"float16\", \"bool\", \"str\", \"date-time\"]", | |
"default": "str" | |
}, | |
"vlen": { | |
"type": "integer", | |
"minimum": 0, | |
"title": "Vlen", | |
"description": "_Optional_. Vector length. If this is larger than zero, then `dtype` must be one of the floating data types. Defaults to zero.", | |
"default": 0 | |
}, | |
"index": { | |
"type": "boolean", | |
"title": "Index", | |
"description": "_Optional_. Whether to build full-text-search (FTS) or vector index for this column. Only applies to string and vector columns. Defaults to True.", | |
"default": true | |
}, | |
"gen_config": { | |
"anyOf": [ | |
{ | |
"properties": { | |
"id": { | |
"type": "string", | |
"title": "Id", | |
"description": "Chat ID. Must be unique against document ID for it to be embeddable. Defaults to ''.", | |
"default": "" | |
}, | |
"model": { | |
"type": "string", | |
"title": "Model", | |
"description": "ID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API.", | |
"default": "" | |
}, | |
"messages": { | |
"items": { | |
"properties": { | |
"role": { | |
"type": "string", | |
"enum": [ | |
"system", | |
"user", | |
"assistant" | |
], | |
"title": "ChatRole", | |
"description": "Represents who said a chat message." | |
}, | |
"content": { | |
"type": "string", | |
"title": "Content" | |
}, | |
"name": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Name" | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"role", | |
"content" | |
], | |
"title": "ChatEntry", | |
"description": "Represents a message in the chat context." | |
}, | |
"type": "array", | |
"minItems": 1, | |
"title": "Messages", | |
"description": "A list of messages comprising the conversation so far." | |
}, | |
"rag_params": { | |
"anyOf": [ | |
{ | |
"properties": { | |
"table_id": { | |
"type": "string", | |
"minLength": 2, | |
"title": "Table Id", | |
"description": "Knowledge Table ID", | |
"examples": [ | |
"my-dataset" | |
] | |
}, | |
"reranking_model": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Reranking Model", | |
"description": "Reranking model to use for hybrid search.", | |
"examples": [ | |
"cohere/rerank-multilingual-v3.0", | |
null | |
] | |
}, | |
"search_query": { | |
"type": "string", | |
"title": "Search Query", | |
"description": "Query used to retrieve items from the KB database. If not provided (default), it will be generated using LLM.", | |
"default": "" | |
}, | |
"k": { | |
"type": "integer", | |
"maximum": 1024, | |
"exclusiveMinimum": 0, | |
"title": "K", | |
"description": "Top-k closest text in terms of embedding distance. Must be in [1, 1024]. Defaults to 3.", | |
"default": 3, | |
"examples": [ | |
3 | |
] | |
}, | |
"rerank": { | |
"type": "boolean", | |
"title": "Rerank", | |
"description": "Flag to perform rerank on the retrieved results. Defaults to True.", | |
"default": true, | |
"examples": [ | |
true, | |
false | |
] | |
}, | |
"concat_reranker_input": { | |
"type": "boolean", | |
"title": "Concat Reranker Input", | |
"description": "Flag to concat title and content as reranker input. Defaults to False.", | |
"default": false, | |
"examples": [ | |
true, | |
false | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"table_id" | |
], | |
"title": "RAGParams" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"description": "Retrieval Augmented Generation search params. Defaults to None (disabled).", | |
"examples": [ | |
null | |
] | |
}, | |
"temperature": { | |
"type": "number", | |
"maximum": 2, | |
"minimum": 0.001, | |
"title": "Temperature", | |
"description": " What sampling temperature to use, in [0.001, 2.0].\n Higher values like 0.8 will make the output more random,\n while lower values like 0.2 will make it more focused and deterministic.\n", | |
"default": 0.2, | |
"examples": [ | |
0.2 | |
] | |
}, | |
"top_p": { | |
"type": "number", | |
"maximum": 1, | |
"minimum": 0.001, | |
"title": "Top P", | |
"description": " An alternative to sampling with temperature, called nucleus sampling,\n where the model considers the results of the tokens with top_p probability mass.\n So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n Must be in [0.001, 1.0].\n", | |
"default": 0.6, | |
"examples": [ | |
0.6 | |
] | |
}, | |
"n": { | |
"type": "integer", | |
"title": "N", | |
"description": "How many chat completion choices to generate for each input message.", | |
"default": 1, | |
"examples": [ | |
1 | |
] | |
}, | |
"stream": { | |
"type": "boolean", | |
"title": "Stream", | |
"description": "\n If set, partial message deltas will be sent, like in ChatGPT.\n Tokens will be sent as server-sent events as they become available,\n with the stream terminated by a 'data: [DONE]' message.\n", | |
"default": true, | |
"examples": [ | |
true | |
] | |
}, | |
"stop": { | |
"anyOf": [ | |
{ | |
"items": { | |
"type": "string" | |
}, | |
"type": "array" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Stop", | |
"description": "Up to 4 sequences where the API will stop generating further tokens.", | |
"examples": [ | |
null | |
] | |
}, | |
"max_tokens": { | |
"type": "integer", | |
"exclusiveMinimum": 0, | |
"title": "Max Tokens", | |
"description": " The maximum number of tokens to generate in the chat completion.\n Must be in [1, context_length - 1). Default is 2048.\n The total length of input tokens and generated tokens is limited by the model's context length.\n", | |
"default": 2048, | |
"examples": [ | |
2048 | |
] | |
}, | |
"presence_penalty": { | |
"type": "number", | |
"title": "Presence Penalty", | |
"description": " Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far,\n increasing the model's likelihood to talk about new topics.\n", | |
"default": 0, | |
"examples": [ | |
0 | |
] | |
}, | |
"frequency_penalty": { | |
"type": "number", | |
"title": "Frequency Penalty", | |
"description": " Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far,\n decreasing the model's likelihood to repeat the same line verbatim.\n", | |
"default": 0, | |
"examples": [ | |
0 | |
] | |
}, | |
"logit_bias": { | |
"type": "object", | |
"title": "Logit Bias", | |
"description": " Modify the likelihood of specified tokens appearing in the completion.\n Accepts a json object that maps tokens (specified by their token ID in the tokenizer)\n to an associated bias value from -100 to 100.\n Mathematically, the bias is added to the logits generated by the model prior to sampling.\n The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection;\n values like -100 or 100 should result in a ban or exclusive selection of the relevant token.\n", | |
"default": {}, | |
"examples": [ | |
{} | |
] | |
}, | |
"user": { | |
"type": "string", | |
"title": "User", | |
"description": "A unique identifier representing your end-user. For monitoring and debugging purposes.", | |
"default": "", | |
"examples": [ | |
"" | |
] | |
}, | |
"object": { | |
"type": "string", | |
"enum": [ | |
"gen_config.chat" | |
], | |
"const": "gen_config.chat", | |
"title": "Object", | |
"description": "The object type, which is always \"gen_config.chat\".", | |
"default": "gen_config.chat", | |
"examples": [ | |
"gen_config.chat" | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"messages" | |
], | |
"title": "ChatGenConfig" | |
}, | |
{ | |
"properties": { | |
"object": { | |
"type": "string", | |
"enum": [ | |
"gen_config.embed" | |
], | |
"const": "gen_config.embed", | |
"title": "Object", | |
"description": "The object type, which is always \"gen_config.embed\".", | |
"default": "gen_config.embed", | |
"examples": [ | |
"gen_config.embed" | |
] | |
}, | |
"embedding_model": { | |
"type": "string", | |
"title": "Embedding Model", | |
"description": "The embedding model to use.", | |
"examples": [ | |
"openai/text-embedding-3-small-512" | |
] | |
}, | |
"source_column": { | |
"type": "string", | |
"title": "Source Column", | |
"description": "The source column for embedding.", | |
"examples": [ | |
"text_column" | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"embedding_model", | |
"source_column" | |
], | |
"title": "EmbedGenConfig" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Gen Config", | |
"description": "_Optional_. Generation config. If provided, then this column will be an \"Output Column\". Table columns on its left can be referenced by `${column-name}`." | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"id" | |
], | |
"title": "ColumnSchema" | |
}, | |
"type": "array", | |
"title": "Cols", | |
"description": "List of column schema." | |
}, | |
"parent_id": { | |
"anyOf": [ | |
{ | |
"type": "string", | |
"pattern": "^[A-Za-z0-9]([A-Za-z0-9._-]{0,98}[A-Za-z0-9])?$", | |
"description": "Table name or ID. Must be unique with at least 1 character and up to 100 characters. Must start and end with an alphabet or number. Characters in the middle can include `_` (underscore), `-` (dash), `.` (dot)." | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Parent Id", | |
"description": "The parent table ID. If None (default), it means this is a template table." | |
}, | |
"title": { | |
"type": "string", | |
"title": "Title", | |
"description": "Chat title. Defaults to ''." | |
}, | |
"updated_at": { | |
"type": "string", | |
"title": "Updated At", | |
"description": "Table last update timestamp (ISO 8601 UTC)." | |
}, | |
"indexed_at_fts": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Indexed At Fts", | |
"description": "Table last FTS index timestamp (ISO 8601 UTC)." | |
}, | |
"indexed_at_vec": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Indexed At Vec", | |
"description": "Table last vector index timestamp (ISO 8601 UTC)." | |
}, | |
"indexed_at_sca": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Indexed At Sca", | |
"description": "Table last scalar index timestamp (ISO 8601 UTC)." | |
}, | |
"num_rows": { | |
"type": "integer", | |
"title": "Num Rows", | |
"description": "Number of rows in the table." | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"id", | |
"cols", | |
"parent_id", | |
"title", | |
"updated_at", | |
"indexed_at_fts", | |
"indexed_at_vec", | |
"indexed_at_sca", | |
"num_rows" | |
], | |
"title": "TableMetaResponse" | |
} | |
} | |
} | |
}, | |
"422": { | |
"description": "Validation Error", | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"detail": { | |
"items": { | |
"properties": { | |
"loc": { | |
"items": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "integer" | |
} | |
] | |
}, | |
"type": "array", | |
"title": "Location" | |
}, | |
"msg": { | |
"type": "string", | |
"title": "Message" | |
}, | |
"type": { | |
"type": "string", | |
"title": "Error Type" | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"loc", | |
"msg", | |
"type" | |
], | |
"title": "ValidationError" | |
}, | |
"type": "array", | |
"title": "Detail" | |
} | |
}, | |
"type": "object", | |
"title": "HTTPValidationError" | |
} | |
} | |
} | |
} | |
} | |
} | |
}, | |
"/api/v1/gen_tables/knowledge": { | |
"post": { | |
"tags": [ | |
"Generative Table" | |
], | |
"summary": "Create Knowledge Table", | |
"operationId": "create_knowledge_table", | |
"parameters": [ | |
{ | |
"name": "openai-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "OpenAI API key.", | |
"default": "", | |
"title": "Openai-Api-Key" | |
}, | |
"description": "OpenAI API key." | |
}, | |
{ | |
"name": "anthropic-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Anthropic API key.", | |
"default": "", | |
"title": "Anthropic-Api-Key" | |
}, | |
"description": "Anthropic API key." | |
}, | |
{ | |
"name": "gemini-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Google Gemini API key.", | |
"default": "", | |
"title": "Gemini-Api-Key" | |
}, | |
"description": "Google Gemini API key." | |
}, | |
{ | |
"name": "cohere-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Cohere API key.", | |
"default": "", | |
"title": "Cohere-Api-Key" | |
}, | |
"description": "Cohere API key." | |
}, | |
{ | |
"name": "groq-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Groq API key.", | |
"default": "", | |
"title": "Groq-Api-Key" | |
}, | |
"description": "Groq API key." | |
}, | |
{ | |
"name": "together-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Together AI API key.", | |
"default": "", | |
"title": "Together-Api-Key" | |
}, | |
"description": "Together AI API key." | |
}, | |
{ | |
"name": "jina-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Jina API key.", | |
"default": "", | |
"title": "Jina-Api-Key" | |
}, | |
"description": "Jina API key." | |
}, | |
{ | |
"name": "voyage-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Voyage API key.", | |
"default": "", | |
"title": "Voyage-Api-Key" | |
}, | |
"description": "Voyage API key." | |
} | |
], | |
"requestBody": { | |
"required": true, | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"id": { | |
"type": "string", | |
"pattern": "^[A-Za-z0-9]([A-Za-z0-9._-]{0,98}[A-Za-z0-9])?$", | |
"title": "Id", | |
"description": "Table name." | |
}, | |
"version": { | |
"type": "string", | |
"title": "Version", | |
"description": "Table version, following owl version.", | |
"default": "0.3.0" | |
}, | |
"meta": { | |
"type": "object", | |
"title": "Meta", | |
"description": "Additional metadata about the table.", | |
"default": {} | |
}, | |
"cols": { | |
"items": { | |
"properties": { | |
"id": { | |
"type": "string", | |
"pattern": "^[A-Za-z0-9]([A-Za-z0-9 _-]{0,98}[A-Za-z0-9])?$", | |
"title": "Id", | |
"description": "Column name." | |
}, | |
"dtype": { | |
"allOf": [ | |
{ | |
"type": "string", | |
"enum": [ | |
"int", | |
"float", | |
"bool", | |
"str" | |
], | |
"title": "DtypeCreateEnum" | |
} | |
], | |
"description": "Column data type, one of [\"int\", \"float\", \"bool\", \"str\"]", | |
"default": "str" | |
}, | |
"vlen": { | |
"type": "integer", | |
"minimum": 0, | |
"title": "Vlen", | |
"description": "_Optional_. Vector length. If this is larger than zero, then `dtype` must be one of the floating data types. Defaults to zero.", | |
"default": 0 | |
}, | |
"index": { | |
"type": "boolean", | |
"title": "Index", | |
"description": "_Optional_. Whether to build full-text-search (FTS) or vector index for this column. Only applies to string and vector columns. Defaults to True.", | |
"default": true | |
}, | |
"gen_config": { | |
"anyOf": [ | |
{ | |
"properties": { | |
"id": { | |
"type": "string", | |
"title": "Id", | |
"description": "Chat ID. Must be unique against document ID for it to be embeddable. Defaults to ''.", | |
"default": "" | |
}, | |
"model": { | |
"type": "string", | |
"title": "Model", | |
"description": "ID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API.", | |
"default": "" | |
}, | |
"messages": { | |
"items": { | |
"properties": { | |
"role": { | |
"type": "string", | |
"enum": [ | |
"system", | |
"user", | |
"assistant" | |
], | |
"title": "ChatRole", | |
"description": "Represents who said a chat message." | |
}, | |
"content": { | |
"type": "string", | |
"title": "Content" | |
}, | |
"name": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Name" | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"role", | |
"content" | |
], | |
"title": "ChatEntry", | |
"description": "Represents a message in the chat context." | |
}, | |
"type": "array", | |
"minItems": 1, | |
"title": "Messages", | |
"description": "A list of messages comprising the conversation so far." | |
}, | |
"rag_params": { | |
"anyOf": [ | |
{ | |
"properties": { | |
"table_id": { | |
"type": "string", | |
"minLength": 2, | |
"title": "Table Id", | |
"description": "Knowledge Table ID", | |
"examples": [ | |
"my-dataset" | |
] | |
}, | |
"reranking_model": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Reranking Model", | |
"description": "Reranking model to use for hybrid search.", | |
"examples": [ | |
"cohere/rerank-multilingual-v3.0", | |
null | |
] | |
}, | |
"search_query": { | |
"type": "string", | |
"title": "Search Query", | |
"description": "Query used to retrieve items from the KB database. If not provided (default), it will be generated using LLM.", | |
"default": "" | |
}, | |
"k": { | |
"type": "integer", | |
"maximum": 1024, | |
"exclusiveMinimum": 0, | |
"title": "K", | |
"description": "Top-k closest text in terms of embedding distance. Must be in [1, 1024]. Defaults to 3.", | |
"default": 3, | |
"examples": [ | |
3 | |
] | |
}, | |
"rerank": { | |
"type": "boolean", | |
"title": "Rerank", | |
"description": "Flag to perform rerank on the retrieved results. Defaults to True.", | |
"default": true, | |
"examples": [ | |
true, | |
false | |
] | |
}, | |
"concat_reranker_input": { | |
"type": "boolean", | |
"title": "Concat Reranker Input", | |
"description": "Flag to concat title and content as reranker input. Defaults to False.", | |
"default": false, | |
"examples": [ | |
true, | |
false | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"table_id" | |
], | |
"title": "RAGParams" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"description": "Retrieval Augmented Generation search params. Defaults to None (disabled).", | |
"examples": [ | |
null | |
] | |
}, | |
"temperature": { | |
"type": "number", | |
"maximum": 2, | |
"minimum": 0.001, | |
"title": "Temperature", | |
"description": " What sampling temperature to use, in [0.001, 2.0].\n Higher values like 0.8 will make the output more random,\n while lower values like 0.2 will make it more focused and deterministic.\n", | |
"default": 0.2, | |
"examples": [ | |
0.2 | |
] | |
}, | |
"top_p": { | |
"type": "number", | |
"maximum": 1, | |
"minimum": 0.001, | |
"title": "Top P", | |
"description": " An alternative to sampling with temperature, called nucleus sampling,\n where the model considers the results of the tokens with top_p probability mass.\n So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n Must be in [0.001, 1.0].\n", | |
"default": 0.6, | |
"examples": [ | |
0.6 | |
] | |
}, | |
"n": { | |
"type": "integer", | |
"title": "N", | |
"description": "How many chat completion choices to generate for each input message.", | |
"default": 1, | |
"examples": [ | |
1 | |
] | |
}, | |
"stream": { | |
"type": "boolean", | |
"title": "Stream", | |
"description": "\n If set, partial message deltas will be sent, like in ChatGPT.\n Tokens will be sent as server-sent events as they become available,\n with the stream terminated by a 'data: [DONE]' message.\n", | |
"default": true, | |
"examples": [ | |
true | |
] | |
}, | |
"stop": { | |
"anyOf": [ | |
{ | |
"items": { | |
"type": "string" | |
}, | |
"type": "array" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Stop", | |
"description": "Up to 4 sequences where the API will stop generating further tokens.", | |
"examples": [ | |
null | |
] | |
}, | |
"max_tokens": { | |
"type": "integer", | |
"exclusiveMinimum": 0, | |
"title": "Max Tokens", | |
"description": " The maximum number of tokens to generate in the chat completion.\n Must be in [1, context_length - 1). Default is 2048.\n The total length of input tokens and generated tokens is limited by the model's context length.\n", | |
"default": 2048, | |
"examples": [ | |
2048 | |
] | |
}, | |
"presence_penalty": { | |
"type": "number", | |
"title": "Presence Penalty", | |
"description": " Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far,\n increasing the model's likelihood to talk about new topics.\n", | |
"default": 0, | |
"examples": [ | |
0 | |
] | |
}, | |
"frequency_penalty": { | |
"type": "number", | |
"title": "Frequency Penalty", | |
"description": " Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far,\n decreasing the model's likelihood to repeat the same line verbatim.\n", | |
"default": 0, | |
"examples": [ | |
0 | |
] | |
}, | |
"logit_bias": { | |
"type": "object", | |
"title": "Logit Bias", | |
"description": " Modify the likelihood of specified tokens appearing in the completion.\n Accepts a json object that maps tokens (specified by their token ID in the tokenizer)\n to an associated bias value from -100 to 100.\n Mathematically, the bias is added to the logits generated by the model prior to sampling.\n The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection;\n values like -100 or 100 should result in a ban or exclusive selection of the relevant token.\n", | |
"default": {}, | |
"examples": [ | |
{} | |
] | |
}, | |
"user": { | |
"type": "string", | |
"title": "User", | |
"description": "A unique identifier representing your end-user. For monitoring and debugging purposes.", | |
"default": "", | |
"examples": [ | |
"" | |
] | |
}, | |
"object": { | |
"type": "string", | |
"enum": [ | |
"gen_config.chat" | |
], | |
"const": "gen_config.chat", | |
"title": "Object", | |
"description": "The object type, which is always \"gen_config.chat\".", | |
"default": "gen_config.chat", | |
"examples": [ | |
"gen_config.chat" | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"messages" | |
], | |
"title": "ChatGenConfig" | |
}, | |
{ | |
"properties": { | |
"object": { | |
"type": "string", | |
"enum": [ | |
"gen_config.embed" | |
], | |
"const": "gen_config.embed", | |
"title": "Object", | |
"description": "The object type, which is always \"gen_config.embed\".", | |
"default": "gen_config.embed", | |
"examples": [ | |
"gen_config.embed" | |
] | |
}, | |
"embedding_model": { | |
"type": "string", | |
"title": "Embedding Model", | |
"description": "The embedding model to use.", | |
"examples": [ | |
"openai/text-embedding-3-small-512" | |
] | |
}, | |
"source_column": { | |
"type": "string", | |
"title": "Source Column", | |
"description": "The source column for embedding.", | |
"examples": [ | |
"text_column" | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"embedding_model", | |
"source_column" | |
], | |
"title": "EmbedGenConfig" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Gen Config", | |
"description": "_Optional_. Generation config. If provided, then this column will be an \"Output Column\". Table columns on its left can be referenced by `${column-name}`." | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"id" | |
], | |
"title": "ColumnSchemaCreate" | |
}, | |
"type": "array", | |
"title": "Cols", | |
"description": "List of column schema." | |
}, | |
"embedding_model": { | |
"type": "string", | |
"title": "Embedding Model" | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"id", | |
"cols", | |
"embedding_model" | |
], | |
"title": "KnowledgeTableSchemaCreate" | |
} | |
} | |
} | |
}, | |
"responses": { | |
"200": { | |
"description": "Successful Response", | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"id": { | |
"type": "string", | |
"title": "Id", | |
"description": "Table name." | |
}, | |
"version": { | |
"type": "string", | |
"title": "Version", | |
"description": "Table version, following owl version.", | |
"default": "0.3.0" | |
}, | |
"meta": { | |
"type": "object", | |
"title": "Meta", | |
"description": "Additional metadata about the table.", | |
"default": {} | |
}, | |
"cols": { | |
"items": { | |
"properties": { | |
"id": { | |
"type": "string", | |
"title": "Id", | |
"description": "Column name." | |
}, | |
"dtype": { | |
"allOf": [ | |
{ | |
"type": "string", | |
"enum": [ | |
"int", | |
"int8", | |
"float", | |
"float32", | |
"float16", | |
"bool", | |
"str", | |
"date-time" | |
], | |
"title": "DtypeEnum" | |
} | |
], | |
"description": "Column data type, one of [\"int\", \"int8\", \"float\", \"float32\", \"float16\", \"bool\", \"str\", \"date-time\"]", | |
"default": "str" | |
}, | |
"vlen": { | |
"type": "integer", | |
"minimum": 0, | |
"title": "Vlen", | |
"description": "_Optional_. Vector length. If this is larger than zero, then `dtype` must be one of the floating data types. Defaults to zero.", | |
"default": 0 | |
}, | |
"index": { | |
"type": "boolean", | |
"title": "Index", | |
"description": "_Optional_. Whether to build full-text-search (FTS) or vector index for this column. Only applies to string and vector columns. Defaults to True.", | |
"default": true | |
}, | |
"gen_config": { | |
"anyOf": [ | |
{ | |
"properties": { | |
"id": { | |
"type": "string", | |
"title": "Id", | |
"description": "Chat ID. Must be unique against document ID for it to be embeddable. Defaults to ''.", | |
"default": "" | |
}, | |
"model": { | |
"type": "string", | |
"title": "Model", | |
"description": "ID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API.", | |
"default": "" | |
}, | |
"messages": { | |
"items": { | |
"properties": { | |
"role": { | |
"type": "string", | |
"enum": [ | |
"system", | |
"user", | |
"assistant" | |
], | |
"title": "ChatRole", | |
"description": "Represents who said a chat message." | |
}, | |
"content": { | |
"type": "string", | |
"title": "Content" | |
}, | |
"name": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Name" | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"role", | |
"content" | |
], | |
"title": "ChatEntry", | |
"description": "Represents a message in the chat context." | |
}, | |
"type": "array", | |
"minItems": 1, | |
"title": "Messages", | |
"description": "A list of messages comprising the conversation so far." | |
}, | |
"rag_params": { | |
"anyOf": [ | |
{ | |
"properties": { | |
"table_id": { | |
"type": "string", | |
"minLength": 2, | |
"title": "Table Id", | |
"description": "Knowledge Table ID", | |
"examples": [ | |
"my-dataset" | |
] | |
}, | |
"reranking_model": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Reranking Model", | |
"description": "Reranking model to use for hybrid search.", | |
"examples": [ | |
"cohere/rerank-multilingual-v3.0", | |
null | |
] | |
}, | |
"search_query": { | |
"type": "string", | |
"title": "Search Query", | |
"description": "Query used to retrieve items from the KB database. If not provided (default), it will be generated using LLM.", | |
"default": "" | |
}, | |
"k": { | |
"type": "integer", | |
"maximum": 1024, | |
"exclusiveMinimum": 0, | |
"title": "K", | |
"description": "Top-k closest text in terms of embedding distance. Must be in [1, 1024]. Defaults to 3.", | |
"default": 3, | |
"examples": [ | |
3 | |
] | |
}, | |
"rerank": { | |
"type": "boolean", | |
"title": "Rerank", | |
"description": "Flag to perform rerank on the retrieved results. Defaults to True.", | |
"default": true, | |
"examples": [ | |
true, | |
false | |
] | |
}, | |
"concat_reranker_input": { | |
"type": "boolean", | |
"title": "Concat Reranker Input", | |
"description": "Flag to concat title and content as reranker input. Defaults to False.", | |
"default": false, | |
"examples": [ | |
true, | |
false | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"table_id" | |
], | |
"title": "RAGParams" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"description": "Retrieval Augmented Generation search params. Defaults to None (disabled).", | |
"examples": [ | |
null | |
] | |
}, | |
"temperature": { | |
"type": "number", | |
"maximum": 2, | |
"minimum": 0.001, | |
"title": "Temperature", | |
"description": " What sampling temperature to use, in [0.001, 2.0].\n Higher values like 0.8 will make the output more random,\n while lower values like 0.2 will make it more focused and deterministic.\n", | |
"default": 0.2, | |
"examples": [ | |
0.2 | |
] | |
}, | |
"top_p": { | |
"type": "number", | |
"maximum": 1, | |
"minimum": 0.001, | |
"title": "Top P", | |
"description": " An alternative to sampling with temperature, called nucleus sampling,\n where the model considers the results of the tokens with top_p probability mass.\n So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n Must be in [0.001, 1.0].\n", | |
"default": 0.6, | |
"examples": [ | |
0.6 | |
] | |
}, | |
"n": { | |
"type": "integer", | |
"title": "N", | |
"description": "How many chat completion choices to generate for each input message.", | |
"default": 1, | |
"examples": [ | |
1 | |
] | |
}, | |
"stream": { | |
"type": "boolean", | |
"title": "Stream", | |
"description": "\n If set, partial message deltas will be sent, like in ChatGPT.\n Tokens will be sent as server-sent events as they become available,\n with the stream terminated by a 'data: [DONE]' message.\n", | |
"default": true, | |
"examples": [ | |
true | |
] | |
}, | |
"stop": { | |
"anyOf": [ | |
{ | |
"items": { | |
"type": "string" | |
}, | |
"type": "array" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Stop", | |
"description": "Up to 4 sequences where the API will stop generating further tokens.", | |
"examples": [ | |
null | |
] | |
}, | |
"max_tokens": { | |
"type": "integer", | |
"exclusiveMinimum": 0, | |
"title": "Max Tokens", | |
"description": " The maximum number of tokens to generate in the chat completion.\n Must be in [1, context_length - 1). Default is 2048.\n The total length of input tokens and generated tokens is limited by the model's context length.\n", | |
"default": 2048, | |
"examples": [ | |
2048 | |
] | |
}, | |
"presence_penalty": { | |
"type": "number", | |
"title": "Presence Penalty", | |
"description": " Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far,\n increasing the model's likelihood to talk about new topics.\n", | |
"default": 0, | |
"examples": [ | |
0 | |
] | |
}, | |
"frequency_penalty": { | |
"type": "number", | |
"title": "Frequency Penalty", | |
"description": " Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far,\n decreasing the model's likelihood to repeat the same line verbatim.\n", | |
"default": 0, | |
"examples": [ | |
0 | |
] | |
}, | |
"logit_bias": { | |
"type": "object", | |
"title": "Logit Bias", | |
"description": " Modify the likelihood of specified tokens appearing in the completion.\n Accepts a json object that maps tokens (specified by their token ID in the tokenizer)\n to an associated bias value from -100 to 100.\n Mathematically, the bias is added to the logits generated by the model prior to sampling.\n The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection;\n values like -100 or 100 should result in a ban or exclusive selection of the relevant token.\n", | |
"default": {}, | |
"examples": [ | |
{} | |
] | |
}, | |
"user": { | |
"type": "string", | |
"title": "User", | |
"description": "A unique identifier representing your end-user. For monitoring and debugging purposes.", | |
"default": "", | |
"examples": [ | |
"" | |
] | |
}, | |
"object": { | |
"type": "string", | |
"enum": [ | |
"gen_config.chat" | |
], | |
"const": "gen_config.chat", | |
"title": "Object", | |
"description": "The object type, which is always \"gen_config.chat\".", | |
"default": "gen_config.chat", | |
"examples": [ | |
"gen_config.chat" | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"messages" | |
], | |
"title": "ChatGenConfig" | |
}, | |
{ | |
"properties": { | |
"object": { | |
"type": "string", | |
"enum": [ | |
"gen_config.embed" | |
], | |
"const": "gen_config.embed", | |
"title": "Object", | |
"description": "The object type, which is always \"gen_config.embed\".", | |
"default": "gen_config.embed", | |
"examples": [ | |
"gen_config.embed" | |
] | |
}, | |
"embedding_model": { | |
"type": "string", | |
"title": "Embedding Model", | |
"description": "The embedding model to use.", | |
"examples": [ | |
"openai/text-embedding-3-small-512" | |
] | |
}, | |
"source_column": { | |
"type": "string", | |
"title": "Source Column", | |
"description": "The source column for embedding.", | |
"examples": [ | |
"text_column" | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"embedding_model", | |
"source_column" | |
], | |
"title": "EmbedGenConfig" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Gen Config", | |
"description": "_Optional_. Generation config. If provided, then this column will be an \"Output Column\". Table columns on its left can be referenced by `${column-name}`." | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"id" | |
], | |
"title": "ColumnSchema" | |
}, | |
"type": "array", | |
"title": "Cols", | |
"description": "List of column schema." | |
}, | |
"parent_id": { | |
"anyOf": [ | |
{ | |
"type": "string", | |
"pattern": "^[A-Za-z0-9]([A-Za-z0-9._-]{0,98}[A-Za-z0-9])?$", | |
"description": "Table name or ID. Must be unique with at least 1 character and up to 100 characters. Must start and end with an alphabet or number. Characters in the middle can include `_` (underscore), `-` (dash), `.` (dot)." | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Parent Id", | |
"description": "The parent table ID. If None (default), it means this is a template table." | |
}, | |
"title": { | |
"type": "string", | |
"title": "Title", | |
"description": "Chat title. Defaults to ''." | |
}, | |
"updated_at": { | |
"type": "string", | |
"title": "Updated At", | |
"description": "Table last update timestamp (ISO 8601 UTC)." | |
}, | |
"indexed_at_fts": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Indexed At Fts", | |
"description": "Table last FTS index timestamp (ISO 8601 UTC)." | |
}, | |
"indexed_at_vec": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Indexed At Vec", | |
"description": "Table last vector index timestamp (ISO 8601 UTC)." | |
}, | |
"indexed_at_sca": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Indexed At Sca", | |
"description": "Table last scalar index timestamp (ISO 8601 UTC)." | |
}, | |
"num_rows": { | |
"type": "integer", | |
"title": "Num Rows", | |
"description": "Number of rows in the table." | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"id", | |
"cols", | |
"parent_id", | |
"title", | |
"updated_at", | |
"indexed_at_fts", | |
"indexed_at_vec", | |
"indexed_at_sca", | |
"num_rows" | |
], | |
"title": "TableMetaResponse" | |
} | |
} | |
} | |
}, | |
"422": { | |
"description": "Validation Error", | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"detail": { | |
"items": { | |
"properties": { | |
"loc": { | |
"items": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "integer" | |
} | |
] | |
}, | |
"type": "array", | |
"title": "Location" | |
}, | |
"msg": { | |
"type": "string", | |
"title": "Message" | |
}, | |
"type": { | |
"type": "string", | |
"title": "Error Type" | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"loc", | |
"msg", | |
"type" | |
], | |
"title": "ValidationError" | |
}, | |
"type": "array", | |
"title": "Detail" | |
} | |
}, | |
"type": "object", | |
"title": "HTTPValidationError" | |
} | |
} | |
} | |
} | |
} | |
} | |
}, | |
"/api/v1/gen_tables/chat": { | |
"post": { | |
"tags": [ | |
"Generative Table" | |
], | |
"summary": "Create Chat Table", | |
"operationId": "create_chat_table", | |
"parameters": [ | |
{ | |
"name": "openai-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "OpenAI API key.", | |
"default": "", | |
"title": "Openai-Api-Key" | |
}, | |
"description": "OpenAI API key." | |
}, | |
{ | |
"name": "anthropic-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Anthropic API key.", | |
"default": "", | |
"title": "Anthropic-Api-Key" | |
}, | |
"description": "Anthropic API key." | |
}, | |
{ | |
"name": "gemini-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Google Gemini API key.", | |
"default": "", | |
"title": "Gemini-Api-Key" | |
}, | |
"description": "Google Gemini API key." | |
}, | |
{ | |
"name": "cohere-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Cohere API key.", | |
"default": "", | |
"title": "Cohere-Api-Key" | |
}, | |
"description": "Cohere API key." | |
}, | |
{ | |
"name": "groq-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Groq API key.", | |
"default": "", | |
"title": "Groq-Api-Key" | |
}, | |
"description": "Groq API key." | |
}, | |
{ | |
"name": "together-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Together AI API key.", | |
"default": "", | |
"title": "Together-Api-Key" | |
}, | |
"description": "Together AI API key." | |
}, | |
{ | |
"name": "jina-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Jina API key.", | |
"default": "", | |
"title": "Jina-Api-Key" | |
}, | |
"description": "Jina API key." | |
}, | |
{ | |
"name": "voyage-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Voyage API key.", | |
"default": "", | |
"title": "Voyage-Api-Key" | |
}, | |
"description": "Voyage API key." | |
} | |
], | |
"requestBody": { | |
"required": true, | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"id": { | |
"type": "string", | |
"pattern": "^[A-Za-z0-9]([A-Za-z0-9._-]{0,98}[A-Za-z0-9])?$", | |
"title": "Id", | |
"description": "Table name." | |
}, | |
"version": { | |
"type": "string", | |
"title": "Version", | |
"description": "Table version, following owl version.", | |
"default": "0.3.0" | |
}, | |
"meta": { | |
"type": "object", | |
"title": "Meta", | |
"description": "Additional metadata about the table.", | |
"default": {} | |
}, | |
"cols": { | |
"items": { | |
"properties": { | |
"id": { | |
"type": "string", | |
"pattern": "^[A-Za-z0-9]([A-Za-z0-9 _-]{0,98}[A-Za-z0-9])?$", | |
"title": "Id", | |
"description": "Column name." | |
}, | |
"dtype": { | |
"allOf": [ | |
{ | |
"type": "string", | |
"enum": [ | |
"int", | |
"float", | |
"bool", | |
"str" | |
], | |
"title": "DtypeCreateEnum" | |
} | |
], | |
"description": "Column data type, one of [\"int\", \"float\", \"bool\", \"str\"]", | |
"default": "str" | |
}, | |
"vlen": { | |
"type": "integer", | |
"minimum": 0, | |
"title": "Vlen", | |
"description": "_Optional_. Vector length. If this is larger than zero, then `dtype` must be one of the floating data types. Defaults to zero.", | |
"default": 0 | |
}, | |
"index": { | |
"type": "boolean", | |
"title": "Index", | |
"description": "_Optional_. Whether to build full-text-search (FTS) or vector index for this column. Only applies to string and vector columns. Defaults to True.", | |
"default": true | |
}, | |
"gen_config": { | |
"anyOf": [ | |
{ | |
"properties": { | |
"id": { | |
"type": "string", | |
"title": "Id", | |
"description": "Chat ID. Must be unique against document ID for it to be embeddable. Defaults to ''.", | |
"default": "" | |
}, | |
"model": { | |
"type": "string", | |
"title": "Model", | |
"description": "ID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API.", | |
"default": "" | |
}, | |
"messages": { | |
"items": { | |
"properties": { | |
"role": { | |
"type": "string", | |
"enum": [ | |
"system", | |
"user", | |
"assistant" | |
], | |
"title": "ChatRole", | |
"description": "Represents who said a chat message." | |
}, | |
"content": { | |
"type": "string", | |
"title": "Content" | |
}, | |
"name": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Name" | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"role", | |
"content" | |
], | |
"title": "ChatEntry", | |
"description": "Represents a message in the chat context." | |
}, | |
"type": "array", | |
"minItems": 1, | |
"title": "Messages", | |
"description": "A list of messages comprising the conversation so far." | |
}, | |
"rag_params": { | |
"anyOf": [ | |
{ | |
"properties": { | |
"table_id": { | |
"type": "string", | |
"minLength": 2, | |
"title": "Table Id", | |
"description": "Knowledge Table ID", | |
"examples": [ | |
"my-dataset" | |
] | |
}, | |
"reranking_model": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Reranking Model", | |
"description": "Reranking model to use for hybrid search.", | |
"examples": [ | |
"cohere/rerank-multilingual-v3.0", | |
null | |
] | |
}, | |
"search_query": { | |
"type": "string", | |
"title": "Search Query", | |
"description": "Query used to retrieve items from the KB database. If not provided (default), it will be generated using LLM.", | |
"default": "" | |
}, | |
"k": { | |
"type": "integer", | |
"maximum": 1024, | |
"exclusiveMinimum": 0, | |
"title": "K", | |
"description": "Top-k closest text in terms of embedding distance. Must be in [1, 1024]. Defaults to 3.", | |
"default": 3, | |
"examples": [ | |
3 | |
] | |
}, | |
"rerank": { | |
"type": "boolean", | |
"title": "Rerank", | |
"description": "Flag to perform rerank on the retrieved results. Defaults to True.", | |
"default": true, | |
"examples": [ | |
true, | |
false | |
] | |
}, | |
"concat_reranker_input": { | |
"type": "boolean", | |
"title": "Concat Reranker Input", | |
"description": "Flag to concat title and content as reranker input. Defaults to False.", | |
"default": false, | |
"examples": [ | |
true, | |
false | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"table_id" | |
], | |
"title": "RAGParams" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"description": "Retrieval Augmented Generation search params. Defaults to None (disabled).", | |
"examples": [ | |
null | |
] | |
}, | |
"temperature": { | |
"type": "number", | |
"maximum": 2, | |
"minimum": 0.001, | |
"title": "Temperature", | |
"description": " What sampling temperature to use, in [0.001, 2.0].\n Higher values like 0.8 will make the output more random,\n while lower values like 0.2 will make it more focused and deterministic.\n", | |
"default": 0.2, | |
"examples": [ | |
0.2 | |
] | |
}, | |
"top_p": { | |
"type": "number", | |
"maximum": 1, | |
"minimum": 0.001, | |
"title": "Top P", | |
"description": " An alternative to sampling with temperature, called nucleus sampling,\n where the model considers the results of the tokens with top_p probability mass.\n So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n Must be in [0.001, 1.0].\n", | |
"default": 0.6, | |
"examples": [ | |
0.6 | |
] | |
}, | |
"n": { | |
"type": "integer", | |
"title": "N", | |
"description": "How many chat completion choices to generate for each input message.", | |
"default": 1, | |
"examples": [ | |
1 | |
] | |
}, | |
"stream": { | |
"type": "boolean", | |
"title": "Stream", | |
"description": "\n If set, partial message deltas will be sent, like in ChatGPT.\n Tokens will be sent as server-sent events as they become available,\n with the stream terminated by a 'data: [DONE]' message.\n", | |
"default": true, | |
"examples": [ | |
true | |
] | |
}, | |
"stop": { | |
"anyOf": [ | |
{ | |
"items": { | |
"type": "string" | |
}, | |
"type": "array" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Stop", | |
"description": "Up to 4 sequences where the API will stop generating further tokens.", | |
"examples": [ | |
null | |
] | |
}, | |
"max_tokens": { | |
"type": "integer", | |
"exclusiveMinimum": 0, | |
"title": "Max Tokens", | |
"description": " The maximum number of tokens to generate in the chat completion.\n Must be in [1, context_length - 1). Default is 2048.\n The total length of input tokens and generated tokens is limited by the model's context length.\n", | |
"default": 2048, | |
"examples": [ | |
2048 | |
] | |
}, | |
"presence_penalty": { | |
"type": "number", | |
"title": "Presence Penalty", | |
"description": " Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far,\n increasing the model's likelihood to talk about new topics.\n", | |
"default": 0, | |
"examples": [ | |
0 | |
] | |
}, | |
"frequency_penalty": { | |
"type": "number", | |
"title": "Frequency Penalty", | |
"description": " Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far,\n decreasing the model's likelihood to repeat the same line verbatim.\n", | |
"default": 0, | |
"examples": [ | |
0 | |
] | |
}, | |
"logit_bias": { | |
"type": "object", | |
"title": "Logit Bias", | |
"description": " Modify the likelihood of specified tokens appearing in the completion.\n Accepts a json object that maps tokens (specified by their token ID in the tokenizer)\n to an associated bias value from -100 to 100.\n Mathematically, the bias is added to the logits generated by the model prior to sampling.\n The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection;\n values like -100 or 100 should result in a ban or exclusive selection of the relevant token.\n", | |
"default": {}, | |
"examples": [ | |
{} | |
] | |
}, | |
"user": { | |
"type": "string", | |
"title": "User", | |
"description": "A unique identifier representing your end-user. For monitoring and debugging purposes.", | |
"default": "", | |
"examples": [ | |
"" | |
] | |
}, | |
"object": { | |
"type": "string", | |
"enum": [ | |
"gen_config.chat" | |
], | |
"const": "gen_config.chat", | |
"title": "Object", | |
"description": "The object type, which is always \"gen_config.chat\".", | |
"default": "gen_config.chat", | |
"examples": [ | |
"gen_config.chat" | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"messages" | |
], | |
"title": "ChatGenConfig" | |
}, | |
{ | |
"properties": { | |
"object": { | |
"type": "string", | |
"enum": [ | |
"gen_config.embed" | |
], | |
"const": "gen_config.embed", | |
"title": "Object", | |
"description": "The object type, which is always \"gen_config.embed\".", | |
"default": "gen_config.embed", | |
"examples": [ | |
"gen_config.embed" | |
] | |
}, | |
"embedding_model": { | |
"type": "string", | |
"title": "Embedding Model", | |
"description": "The embedding model to use.", | |
"examples": [ | |
"openai/text-embedding-3-small-512" | |
] | |
}, | |
"source_column": { | |
"type": "string", | |
"title": "Source Column", | |
"description": "The source column for embedding.", | |
"examples": [ | |
"text_column" | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"embedding_model", | |
"source_column" | |
], | |
"title": "EmbedGenConfig" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Gen Config", | |
"description": "_Optional_. Generation config. If provided, then this column will be an \"Output Column\". Table columns on its left can be referenced by `${column-name}`." | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"id" | |
], | |
"title": "ColumnSchemaCreate" | |
}, | |
"type": "array", | |
"title": "Cols", | |
"description": "List of column schema." | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"id", | |
"cols" | |
], | |
"title": "ChatTableSchemaCreate" | |
} | |
} | |
} | |
}, | |
"responses": { | |
"200": { | |
"description": "Successful Response", | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"id": { | |
"type": "string", | |
"title": "Id", | |
"description": "Table name." | |
}, | |
"version": { | |
"type": "string", | |
"title": "Version", | |
"description": "Table version, following owl version.", | |
"default": "0.3.0" | |
}, | |
"meta": { | |
"type": "object", | |
"title": "Meta", | |
"description": "Additional metadata about the table.", | |
"default": {} | |
}, | |
"cols": { | |
"items": { | |
"properties": { | |
"id": { | |
"type": "string", | |
"title": "Id", | |
"description": "Column name." | |
}, | |
"dtype": { | |
"allOf": [ | |
{ | |
"type": "string", | |
"enum": [ | |
"int", | |
"int8", | |
"float", | |
"float32", | |
"float16", | |
"bool", | |
"str", | |
"date-time" | |
], | |
"title": "DtypeEnum" | |
} | |
], | |
"description": "Column data type, one of [\"int\", \"int8\", \"float\", \"float32\", \"float16\", \"bool\", \"str\", \"date-time\"]", | |
"default": "str" | |
}, | |
"vlen": { | |
"type": "integer", | |
"minimum": 0, | |
"title": "Vlen", | |
"description": "_Optional_. Vector length. If this is larger than zero, then `dtype` must be one of the floating data types. Defaults to zero.", | |
"default": 0 | |
}, | |
"index": { | |
"type": "boolean", | |
"title": "Index", | |
"description": "_Optional_. Whether to build full-text-search (FTS) or vector index for this column. Only applies to string and vector columns. Defaults to True.", | |
"default": true | |
}, | |
"gen_config": { | |
"anyOf": [ | |
{ | |
"properties": { | |
"id": { | |
"type": "string", | |
"title": "Id", | |
"description": "Chat ID. Must be unique against document ID for it to be embeddable. Defaults to ''.", | |
"default": "" | |
}, | |
"model": { | |
"type": "string", | |
"title": "Model", | |
"description": "ID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API.", | |
"default": "" | |
}, | |
"messages": { | |
"items": { | |
"properties": { | |
"role": { | |
"type": "string", | |
"enum": [ | |
"system", | |
"user", | |
"assistant" | |
], | |
"title": "ChatRole", | |
"description": "Represents who said a chat message." | |
}, | |
"content": { | |
"type": "string", | |
"title": "Content" | |
}, | |
"name": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Name" | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"role", | |
"content" | |
], | |
"title": "ChatEntry", | |
"description": "Represents a message in the chat context." | |
}, | |
"type": "array", | |
"minItems": 1, | |
"title": "Messages", | |
"description": "A list of messages comprising the conversation so far." | |
}, | |
"rag_params": { | |
"anyOf": [ | |
{ | |
"properties": { | |
"table_id": { | |
"type": "string", | |
"minLength": 2, | |
"title": "Table Id", | |
"description": "Knowledge Table ID", | |
"examples": [ | |
"my-dataset" | |
] | |
}, | |
"reranking_model": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Reranking Model", | |
"description": "Reranking model to use for hybrid search.", | |
"examples": [ | |
"cohere/rerank-multilingual-v3.0", | |
null | |
] | |
}, | |
"search_query": { | |
"type": "string", | |
"title": "Search Query", | |
"description": "Query used to retrieve items from the KB database. If not provided (default), it will be generated using LLM.", | |
"default": "" | |
}, | |
"k": { | |
"type": "integer", | |
"maximum": 1024, | |
"exclusiveMinimum": 0, | |
"title": "K", | |
"description": "Top-k closest text in terms of embedding distance. Must be in [1, 1024]. Defaults to 3.", | |
"default": 3, | |
"examples": [ | |
3 | |
] | |
}, | |
"rerank": { | |
"type": "boolean", | |
"title": "Rerank", | |
"description": "Flag to perform rerank on the retrieved results. Defaults to True.", | |
"default": true, | |
"examples": [ | |
true, | |
false | |
] | |
}, | |
"concat_reranker_input": { | |
"type": "boolean", | |
"title": "Concat Reranker Input", | |
"description": "Flag to concat title and content as reranker input. Defaults to False.", | |
"default": false, | |
"examples": [ | |
true, | |
false | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"table_id" | |
], | |
"title": "RAGParams" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"description": "Retrieval Augmented Generation search params. Defaults to None (disabled).", | |
"examples": [ | |
null | |
] | |
}, | |
"temperature": { | |
"type": "number", | |
"maximum": 2, | |
"minimum": 0.001, | |
"title": "Temperature", | |
"description": " What sampling temperature to use, in [0.001, 2.0].\n Higher values like 0.8 will make the output more random,\n while lower values like 0.2 will make it more focused and deterministic.\n", | |
"default": 0.2, | |
"examples": [ | |
0.2 | |
] | |
}, | |
"top_p": { | |
"type": "number", | |
"maximum": 1, | |
"minimum": 0.001, | |
"title": "Top P", | |
"description": " An alternative to sampling with temperature, called nucleus sampling,\n where the model considers the results of the tokens with top_p probability mass.\n So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n Must be in [0.001, 1.0].\n", | |
"default": 0.6, | |
"examples": [ | |
0.6 | |
] | |
}, | |
"n": { | |
"type": "integer", | |
"title": "N", | |
"description": "How many chat completion choices to generate for each input message.", | |
"default": 1, | |
"examples": [ | |
1 | |
] | |
}, | |
"stream": { | |
"type": "boolean", | |
"title": "Stream", | |
"description": "\n If set, partial message deltas will be sent, like in ChatGPT.\n Tokens will be sent as server-sent events as they become available,\n with the stream terminated by a 'data: [DONE]' message.\n", | |
"default": true, | |
"examples": [ | |
true | |
] | |
}, | |
"stop": { | |
"anyOf": [ | |
{ | |
"items": { | |
"type": "string" | |
}, | |
"type": "array" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Stop", | |
"description": "Up to 4 sequences where the API will stop generating further tokens.", | |
"examples": [ | |
null | |
] | |
}, | |
"max_tokens": { | |
"type": "integer", | |
"exclusiveMinimum": 0, | |
"title": "Max Tokens", | |
"description": " The maximum number of tokens to generate in the chat completion.\n Must be in [1, context_length - 1). Default is 2048.\n The total length of input tokens and generated tokens is limited by the model's context length.\n", | |
"default": 2048, | |
"examples": [ | |
2048 | |
] | |
}, | |
"presence_penalty": { | |
"type": "number", | |
"title": "Presence Penalty", | |
"description": " Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far,\n increasing the model's likelihood to talk about new topics.\n", | |
"default": 0, | |
"examples": [ | |
0 | |
] | |
}, | |
"frequency_penalty": { | |
"type": "number", | |
"title": "Frequency Penalty", | |
"description": " Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far,\n decreasing the model's likelihood to repeat the same line verbatim.\n", | |
"default": 0, | |
"examples": [ | |
0 | |
] | |
}, | |
"logit_bias": { | |
"type": "object", | |
"title": "Logit Bias", | |
"description": " Modify the likelihood of specified tokens appearing in the completion.\n Accepts a json object that maps tokens (specified by their token ID in the tokenizer)\n to an associated bias value from -100 to 100.\n Mathematically, the bias is added to the logits generated by the model prior to sampling.\n The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection;\n values like -100 or 100 should result in a ban or exclusive selection of the relevant token.\n", | |
"default": {}, | |
"examples": [ | |
{} | |
] | |
}, | |
"user": { | |
"type": "string", | |
"title": "User", | |
"description": "A unique identifier representing your end-user. For monitoring and debugging purposes.", | |
"default": "", | |
"examples": [ | |
"" | |
] | |
}, | |
"object": { | |
"type": "string", | |
"enum": [ | |
"gen_config.chat" | |
], | |
"const": "gen_config.chat", | |
"title": "Object", | |
"description": "The object type, which is always \"gen_config.chat\".", | |
"default": "gen_config.chat", | |
"examples": [ | |
"gen_config.chat" | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"messages" | |
], | |
"title": "ChatGenConfig" | |
}, | |
{ | |
"properties": { | |
"object": { | |
"type": "string", | |
"enum": [ | |
"gen_config.embed" | |
], | |
"const": "gen_config.embed", | |
"title": "Object", | |
"description": "The object type, which is always \"gen_config.embed\".", | |
"default": "gen_config.embed", | |
"examples": [ | |
"gen_config.embed" | |
] | |
}, | |
"embedding_model": { | |
"type": "string", | |
"title": "Embedding Model", | |
"description": "The embedding model to use.", | |
"examples": [ | |
"openai/text-embedding-3-small-512" | |
] | |
}, | |
"source_column": { | |
"type": "string", | |
"title": "Source Column", | |
"description": "The source column for embedding.", | |
"examples": [ | |
"text_column" | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"embedding_model", | |
"source_column" | |
], | |
"title": "EmbedGenConfig" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Gen Config", | |
"description": "_Optional_. Generation config. If provided, then this column will be an \"Output Column\". Table columns on its left can be referenced by `${column-name}`." | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"id" | |
], | |
"title": "ColumnSchema" | |
}, | |
"type": "array", | |
"title": "Cols", | |
"description": "List of column schema." | |
}, | |
"parent_id": { | |
"anyOf": [ | |
{ | |
"type": "string", | |
"pattern": "^[A-Za-z0-9]([A-Za-z0-9._-]{0,98}[A-Za-z0-9])?$", | |
"description": "Table name or ID. Must be unique with at least 1 character and up to 100 characters. Must start and end with an alphabet or number. Characters in the middle can include `_` (underscore), `-` (dash), `.` (dot)." | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Parent Id", | |
"description": "The parent table ID. If None (default), it means this is a template table." | |
}, | |
"title": { | |
"type": "string", | |
"title": "Title", | |
"description": "Chat title. Defaults to ''." | |
}, | |
"updated_at": { | |
"type": "string", | |
"title": "Updated At", | |
"description": "Table last update timestamp (ISO 8601 UTC)." | |
}, | |
"indexed_at_fts": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Indexed At Fts", | |
"description": "Table last FTS index timestamp (ISO 8601 UTC)." | |
}, | |
"indexed_at_vec": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Indexed At Vec", | |
"description": "Table last vector index timestamp (ISO 8601 UTC)." | |
}, | |
"indexed_at_sca": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Indexed At Sca", | |
"description": "Table last scalar index timestamp (ISO 8601 UTC)." | |
}, | |
"num_rows": { | |
"type": "integer", | |
"title": "Num Rows", | |
"description": "Number of rows in the table." | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"id", | |
"cols", | |
"parent_id", | |
"title", | |
"updated_at", | |
"indexed_at_fts", | |
"indexed_at_vec", | |
"indexed_at_sca", | |
"num_rows" | |
], | |
"title": "TableMetaResponse" | |
} | |
} | |
} | |
}, | |
"422": { | |
"description": "Validation Error", | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"detail": { | |
"items": { | |
"properties": { | |
"loc": { | |
"items": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "integer" | |
} | |
] | |
}, | |
"type": "array", | |
"title": "Location" | |
}, | |
"msg": { | |
"type": "string", | |
"title": "Message" | |
}, | |
"type": { | |
"type": "string", | |
"title": "Error Type" | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"loc", | |
"msg", | |
"type" | |
], | |
"title": "ValidationError" | |
}, | |
"type": "array", | |
"title": "Detail" | |
} | |
}, | |
"type": "object", | |
"title": "HTTPValidationError" | |
} | |
} | |
} | |
} | |
} | |
} | |
}, | |
"/api/v1/gen_tables/{table_type}/child/{table_id_src}": { | |
"post": { | |
"tags": [ | |
"Generative Table" | |
], | |
"summary": "Create Child Table", | |
"operationId": "create_child_table", | |
"parameters": [ | |
{ | |
"name": "table_type", | |
"in": "path", | |
"required": true, | |
"schema": { | |
"type": "string", | |
"enum": [ | |
"action", | |
"knowledge", | |
"chat" | |
], | |
"title": "TableType" | |
} | |
}, | |
{ | |
"name": "table_id_src", | |
"in": "path", | |
"required": true, | |
"schema": { | |
"type": "string", | |
"pattern": "^[A-Za-z0-9]([A-Za-z0-9._-]{0,98}[A-Za-z0-9])?$", | |
"description": "Source table name or ID.", | |
"title": "Table Id Src" | |
}, | |
"description": "Source table name or ID." | |
}, | |
{ | |
"name": "table_id_dst", | |
"in": "query", | |
"required": false, | |
"schema": { | |
"anyOf": [ | |
{ | |
"type": "string", | |
"pattern": "^[A-Za-z0-9]([A-Za-z0-9._-]{0,98}[A-Za-z0-9])?$" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"description": "Destination table name or ID.", | |
"title": "Table Id Dst" | |
}, | |
"description": "Destination table name or ID." | |
} | |
], | |
"responses": { | |
"200": { | |
"description": "Successful Response", | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"id": { | |
"type": "string", | |
"title": "Id", | |
"description": "Table name." | |
}, | |
"version": { | |
"type": "string", | |
"title": "Version", | |
"description": "Table version, following owl version.", | |
"default": "0.3.0" | |
}, | |
"meta": { | |
"type": "object", | |
"title": "Meta", | |
"description": "Additional metadata about the table.", | |
"default": {} | |
}, | |
"cols": { | |
"items": { | |
"properties": { | |
"id": { | |
"type": "string", | |
"title": "Id", | |
"description": "Column name." | |
}, | |
"dtype": { | |
"allOf": [ | |
{ | |
"type": "string", | |
"enum": [ | |
"int", | |
"int8", | |
"float", | |
"float32", | |
"float16", | |
"bool", | |
"str", | |
"date-time" | |
], | |
"title": "DtypeEnum" | |
} | |
], | |
"description": "Column data type, one of [\"int\", \"int8\", \"float\", \"float32\", \"float16\", \"bool\", \"str\", \"date-time\"]", | |
"default": "str" | |
}, | |
"vlen": { | |
"type": "integer", | |
"minimum": 0, | |
"title": "Vlen", | |
"description": "_Optional_. Vector length. If this is larger than zero, then `dtype` must be one of the floating data types. Defaults to zero.", | |
"default": 0 | |
}, | |
"index": { | |
"type": "boolean", | |
"title": "Index", | |
"description": "_Optional_. Whether to build full-text-search (FTS) or vector index for this column. Only applies to string and vector columns. Defaults to True.", | |
"default": true | |
}, | |
"gen_config": { | |
"anyOf": [ | |
{ | |
"properties": { | |
"id": { | |
"type": "string", | |
"title": "Id", | |
"description": "Chat ID. Must be unique against document ID for it to be embeddable. Defaults to ''.", | |
"default": "" | |
}, | |
"model": { | |
"type": "string", | |
"title": "Model", | |
"description": "ID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API.", | |
"default": "" | |
}, | |
"messages": { | |
"items": { | |
"properties": { | |
"role": { | |
"type": "string", | |
"enum": [ | |
"system", | |
"user", | |
"assistant" | |
], | |
"title": "ChatRole", | |
"description": "Represents who said a chat message." | |
}, | |
"content": { | |
"type": "string", | |
"title": "Content" | |
}, | |
"name": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Name" | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"role", | |
"content" | |
], | |
"title": "ChatEntry", | |
"description": "Represents a message in the chat context." | |
}, | |
"type": "array", | |
"minItems": 1, | |
"title": "Messages", | |
"description": "A list of messages comprising the conversation so far." | |
}, | |
"rag_params": { | |
"anyOf": [ | |
{ | |
"properties": { | |
"table_id": { | |
"type": "string", | |
"minLength": 2, | |
"title": "Table Id", | |
"description": "Knowledge Table ID", | |
"examples": [ | |
"my-dataset" | |
] | |
}, | |
"reranking_model": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Reranking Model", | |
"description": "Reranking model to use for hybrid search.", | |
"examples": [ | |
"cohere/rerank-multilingual-v3.0", | |
null | |
] | |
}, | |
"search_query": { | |
"type": "string", | |
"title": "Search Query", | |
"description": "Query used to retrieve items from the KB database. If not provided (default), it will be generated using LLM.", | |
"default": "" | |
}, | |
"k": { | |
"type": "integer", | |
"maximum": 1024, | |
"exclusiveMinimum": 0, | |
"title": "K", | |
"description": "Top-k closest text in terms of embedding distance. Must be in [1, 1024]. Defaults to 3.", | |
"default": 3, | |
"examples": [ | |
3 | |
] | |
}, | |
"rerank": { | |
"type": "boolean", | |
"title": "Rerank", | |
"description": "Flag to perform rerank on the retrieved results. Defaults to True.", | |
"default": true, | |
"examples": [ | |
true, | |
false | |
] | |
}, | |
"concat_reranker_input": { | |
"type": "boolean", | |
"title": "Concat Reranker Input", | |
"description": "Flag to concat title and content as reranker input. Defaults to False.", | |
"default": false, | |
"examples": [ | |
true, | |
false | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"table_id" | |
], | |
"title": "RAGParams" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"description": "Retrieval Augmented Generation search params. Defaults to None (disabled).", | |
"examples": [ | |
null | |
] | |
}, | |
"temperature": { | |
"type": "number", | |
"maximum": 2, | |
"minimum": 0.001, | |
"title": "Temperature", | |
"description": " What sampling temperature to use, in [0.001, 2.0].\n Higher values like 0.8 will make the output more random,\n while lower values like 0.2 will make it more focused and deterministic.\n", | |
"default": 0.2, | |
"examples": [ | |
0.2 | |
] | |
}, | |
"top_p": { | |
"type": "number", | |
"maximum": 1, | |
"minimum": 0.001, | |
"title": "Top P", | |
"description": " An alternative to sampling with temperature, called nucleus sampling,\n where the model considers the results of the tokens with top_p probability mass.\n So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n Must be in [0.001, 1.0].\n", | |
"default": 0.6, | |
"examples": [ | |
0.6 | |
] | |
}, | |
"n": { | |
"type": "integer", | |
"title": "N", | |
"description": "How many chat completion choices to generate for each input message.", | |
"default": 1, | |
"examples": [ | |
1 | |
] | |
}, | |
"stream": { | |
"type": "boolean", | |
"title": "Stream", | |
"description": "\n If set, partial message deltas will be sent, like in ChatGPT.\n Tokens will be sent as server-sent events as they become available,\n with the stream terminated by a 'data: [DONE]' message.\n", | |
"default": true, | |
"examples": [ | |
true | |
] | |
}, | |
"stop": { | |
"anyOf": [ | |
{ | |
"items": { | |
"type": "string" | |
}, | |
"type": "array" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Stop", | |
"description": "Up to 4 sequences where the API will stop generating further tokens.", | |
"examples": [ | |
null | |
] | |
}, | |
"max_tokens": { | |
"type": "integer", | |
"exclusiveMinimum": 0, | |
"title": "Max Tokens", | |
"description": " The maximum number of tokens to generate in the chat completion.\n Must be in [1, context_length - 1). Default is 2048.\n The total length of input tokens and generated tokens is limited by the model's context length.\n", | |
"default": 2048, | |
"examples": [ | |
2048 | |
] | |
}, | |
"presence_penalty": { | |
"type": "number", | |
"title": "Presence Penalty", | |
"description": " Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far,\n increasing the model's likelihood to talk about new topics.\n", | |
"default": 0, | |
"examples": [ | |
0 | |
] | |
}, | |
"frequency_penalty": { | |
"type": "number", | |
"title": "Frequency Penalty", | |
"description": " Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far,\n decreasing the model's likelihood to repeat the same line verbatim.\n", | |
"default": 0, | |
"examples": [ | |
0 | |
] | |
}, | |
"logit_bias": { | |
"type": "object", | |
"title": "Logit Bias", | |
"description": " Modify the likelihood of specified tokens appearing in the completion.\n Accepts a json object that maps tokens (specified by their token ID in the tokenizer)\n to an associated bias value from -100 to 100.\n Mathematically, the bias is added to the logits generated by the model prior to sampling.\n The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection;\n values like -100 or 100 should result in a ban or exclusive selection of the relevant token.\n", | |
"default": {}, | |
"examples": [ | |
{} | |
] | |
}, | |
"user": { | |
"type": "string", | |
"title": "User", | |
"description": "A unique identifier representing your end-user. For monitoring and debugging purposes.", | |
"default": "", | |
"examples": [ | |
"" | |
] | |
}, | |
"object": { | |
"type": "string", | |
"enum": [ | |
"gen_config.chat" | |
], | |
"const": "gen_config.chat", | |
"title": "Object", | |
"description": "The object type, which is always \"gen_config.chat\".", | |
"default": "gen_config.chat", | |
"examples": [ | |
"gen_config.chat" | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"messages" | |
], | |
"title": "ChatGenConfig" | |
}, | |
{ | |
"properties": { | |
"object": { | |
"type": "string", | |
"enum": [ | |
"gen_config.embed" | |
], | |
"const": "gen_config.embed", | |
"title": "Object", | |
"description": "The object type, which is always \"gen_config.embed\".", | |
"default": "gen_config.embed", | |
"examples": [ | |
"gen_config.embed" | |
] | |
}, | |
"embedding_model": { | |
"type": "string", | |
"title": "Embedding Model", | |
"description": "The embedding model to use.", | |
"examples": [ | |
"openai/text-embedding-3-small-512" | |
] | |
}, | |
"source_column": { | |
"type": "string", | |
"title": "Source Column", | |
"description": "The source column for embedding.", | |
"examples": [ | |
"text_column" | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"embedding_model", | |
"source_column" | |
], | |
"title": "EmbedGenConfig" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Gen Config", | |
"description": "_Optional_. Generation config. If provided, then this column will be an \"Output Column\". Table columns on its left can be referenced by `${column-name}`." | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"id" | |
], | |
"title": "ColumnSchema" | |
}, | |
"type": "array", | |
"title": "Cols", | |
"description": "List of column schema." | |
}, | |
"parent_id": { | |
"anyOf": [ | |
{ | |
"type": "string", | |
"pattern": "^[A-Za-z0-9]([A-Za-z0-9._-]{0,98}[A-Za-z0-9])?$", | |
"description": "Table name or ID. Must be unique with at least 1 character and up to 100 characters. Must start and end with an alphabet or number. Characters in the middle can include `_` (underscore), `-` (dash), `.` (dot)." | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Parent Id", | |
"description": "The parent table ID. If None (default), it means this is a template table." | |
}, | |
"title": { | |
"type": "string", | |
"title": "Title", | |
"description": "Chat title. Defaults to ''." | |
}, | |
"updated_at": { | |
"type": "string", | |
"title": "Updated At", | |
"description": "Table last update timestamp (ISO 8601 UTC)." | |
}, | |
"indexed_at_fts": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Indexed At Fts", | |
"description": "Table last FTS index timestamp (ISO 8601 UTC)." | |
}, | |
"indexed_at_vec": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Indexed At Vec", | |
"description": "Table last vector index timestamp (ISO 8601 UTC)." | |
}, | |
"indexed_at_sca": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Indexed At Sca", | |
"description": "Table last scalar index timestamp (ISO 8601 UTC)." | |
}, | |
"num_rows": { | |
"type": "integer", | |
"title": "Num Rows", | |
"description": "Number of rows in the table." | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"id", | |
"cols", | |
"parent_id", | |
"title", | |
"updated_at", | |
"indexed_at_fts", | |
"indexed_at_vec", | |
"indexed_at_sca", | |
"num_rows" | |
], | |
"title": "TableMetaResponse" | |
} | |
} | |
} | |
}, | |
"422": { | |
"description": "Validation Error", | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"detail": { | |
"items": { | |
"properties": { | |
"loc": { | |
"items": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "integer" | |
} | |
] | |
}, | |
"type": "array", | |
"title": "Location" | |
}, | |
"msg": { | |
"type": "string", | |
"title": "Message" | |
}, | |
"type": { | |
"type": "string", | |
"title": "Error Type" | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"loc", | |
"msg", | |
"type" | |
], | |
"title": "ValidationError" | |
}, | |
"type": "array", | |
"title": "Detail" | |
} | |
}, | |
"type": "object", | |
"title": "HTTPValidationError" | |
} | |
} | |
} | |
} | |
} | |
} | |
}, | |
"/api/v1/gen_tables/{table_type}/duplicate/{table_id_src}/{table_id_dst}": { | |
"post": { | |
"tags": [ | |
"Generative Table" | |
], | |
"summary": "Duplicate Table", | |
"operationId": "duplicate_table", | |
"parameters": [ | |
{ | |
"name": "table_type", | |
"in": "path", | |
"required": true, | |
"schema": { | |
"type": "string", | |
"enum": [ | |
"action", | |
"knowledge", | |
"chat" | |
], | |
"title": "TableType" | |
} | |
}, | |
{ | |
"name": "table_id_src", | |
"in": "path", | |
"required": true, | |
"schema": { | |
"type": "string", | |
"pattern": "^[A-Za-z0-9]([A-Za-z0-9._-]{0,98}[A-Za-z0-9])?$", | |
"description": "Source table name or ID.", | |
"title": "Table Id Src" | |
}, | |
"description": "Source table name or ID." | |
}, | |
{ | |
"name": "table_id_dst", | |
"in": "path", | |
"required": true, | |
"schema": { | |
"type": "string", | |
"pattern": "^[A-Za-z0-9]([A-Za-z0-9._-]{0,98}[A-Za-z0-9])?$", | |
"description": "Destination table name or ID.", | |
"title": "Table Id Dst" | |
}, | |
"description": "Destination table name or ID." | |
}, | |
{ | |
"name": "include_data", | |
"in": "query", | |
"required": false, | |
"schema": { | |
"type": "boolean", | |
"description": "_Optional_. Whether to include the data from the source table in the duplicated table. Defaults to `True`.", | |
"default": true, | |
"title": "Include Data" | |
}, | |
"description": "_Optional_. Whether to include the data from the source table in the duplicated table. Defaults to `True`." | |
}, | |
{ | |
"name": "deploy", | |
"in": "query", | |
"required": false, | |
"schema": { | |
"type": "boolean", | |
"description": "_Optional_. Whether to deploy the duplicated table. Defaults to `False`.", | |
"default": false, | |
"title": "Deploy" | |
}, | |
"description": "_Optional_. Whether to deploy the duplicated table. Defaults to `False`." | |
} | |
], | |
"responses": { | |
"200": { | |
"description": "Successful Response", | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"id": { | |
"type": "string", | |
"title": "Id", | |
"description": "Table name." | |
}, | |
"version": { | |
"type": "string", | |
"title": "Version", | |
"description": "Table version, following owl version.", | |
"default": "0.3.0" | |
}, | |
"meta": { | |
"type": "object", | |
"title": "Meta", | |
"description": "Additional metadata about the table.", | |
"default": {} | |
}, | |
"cols": { | |
"items": { | |
"properties": { | |
"id": { | |
"type": "string", | |
"title": "Id", | |
"description": "Column name." | |
}, | |
"dtype": { | |
"allOf": [ | |
{ | |
"type": "string", | |
"enum": [ | |
"int", | |
"int8", | |
"float", | |
"float32", | |
"float16", | |
"bool", | |
"str", | |
"date-time" | |
], | |
"title": "DtypeEnum" | |
} | |
], | |
"description": "Column data type, one of [\"int\", \"int8\", \"float\", \"float32\", \"float16\", \"bool\", \"str\", \"date-time\"]", | |
"default": "str" | |
}, | |
"vlen": { | |
"type": "integer", | |
"minimum": 0, | |
"title": "Vlen", | |
"description": "_Optional_. Vector length. If this is larger than zero, then `dtype` must be one of the floating data types. Defaults to zero.", | |
"default": 0 | |
}, | |
"index": { | |
"type": "boolean", | |
"title": "Index", | |
"description": "_Optional_. Whether to build full-text-search (FTS) or vector index for this column. Only applies to string and vector columns. Defaults to True.", | |
"default": true | |
}, | |
"gen_config": { | |
"anyOf": [ | |
{ | |
"properties": { | |
"id": { | |
"type": "string", | |
"title": "Id", | |
"description": "Chat ID. Must be unique against document ID for it to be embeddable. Defaults to ''.", | |
"default": "" | |
}, | |
"model": { | |
"type": "string", | |
"title": "Model", | |
"description": "ID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API.", | |
"default": "" | |
}, | |
"messages": { | |
"items": { | |
"properties": { | |
"role": { | |
"type": "string", | |
"enum": [ | |
"system", | |
"user", | |
"assistant" | |
], | |
"title": "ChatRole", | |
"description": "Represents who said a chat message." | |
}, | |
"content": { | |
"type": "string", | |
"title": "Content" | |
}, | |
"name": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Name" | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"role", | |
"content" | |
], | |
"title": "ChatEntry", | |
"description": "Represents a message in the chat context." | |
}, | |
"type": "array", | |
"minItems": 1, | |
"title": "Messages", | |
"description": "A list of messages comprising the conversation so far." | |
}, | |
"rag_params": { | |
"anyOf": [ | |
{ | |
"properties": { | |
"table_id": { | |
"type": "string", | |
"minLength": 2, | |
"title": "Table Id", | |
"description": "Knowledge Table ID", | |
"examples": [ | |
"my-dataset" | |
] | |
}, | |
"reranking_model": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Reranking Model", | |
"description": "Reranking model to use for hybrid search.", | |
"examples": [ | |
"cohere/rerank-multilingual-v3.0", | |
null | |
] | |
}, | |
"search_query": { | |
"type": "string", | |
"title": "Search Query", | |
"description": "Query used to retrieve items from the KB database. If not provided (default), it will be generated using LLM.", | |
"default": "" | |
}, | |
"k": { | |
"type": "integer", | |
"maximum": 1024, | |
"exclusiveMinimum": 0, | |
"title": "K", | |
"description": "Top-k closest text in terms of embedding distance. Must be in [1, 1024]. Defaults to 3.", | |
"default": 3, | |
"examples": [ | |
3 | |
] | |
}, | |
"rerank": { | |
"type": "boolean", | |
"title": "Rerank", | |
"description": "Flag to perform rerank on the retrieved results. Defaults to True.", | |
"default": true, | |
"examples": [ | |
true, | |
false | |
] | |
}, | |
"concat_reranker_input": { | |
"type": "boolean", | |
"title": "Concat Reranker Input", | |
"description": "Flag to concat title and content as reranker input. Defaults to False.", | |
"default": false, | |
"examples": [ | |
true, | |
false | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"table_id" | |
], | |
"title": "RAGParams" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"description": "Retrieval Augmented Generation search params. Defaults to None (disabled).", | |
"examples": [ | |
null | |
] | |
}, | |
"temperature": { | |
"type": "number", | |
"maximum": 2, | |
"minimum": 0.001, | |
"title": "Temperature", | |
"description": " What sampling temperature to use, in [0.001, 2.0].\n Higher values like 0.8 will make the output more random,\n while lower values like 0.2 will make it more focused and deterministic.\n", | |
"default": 0.2, | |
"examples": [ | |
0.2 | |
] | |
}, | |
"top_p": { | |
"type": "number", | |
"maximum": 1, | |
"minimum": 0.001, | |
"title": "Top P", | |
"description": " An alternative to sampling with temperature, called nucleus sampling,\n where the model considers the results of the tokens with top_p probability mass.\n So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n Must be in [0.001, 1.0].\n", | |
"default": 0.6, | |
"examples": [ | |
0.6 | |
] | |
}, | |
"n": { | |
"type": "integer", | |
"title": "N", | |
"description": "How many chat completion choices to generate for each input message.", | |
"default": 1, | |
"examples": [ | |
1 | |
] | |
}, | |
"stream": { | |
"type": "boolean", | |
"title": "Stream", | |
"description": "\n If set, partial message deltas will be sent, like in ChatGPT.\n Tokens will be sent as server-sent events as they become available,\n with the stream terminated by a 'data: [DONE]' message.\n", | |
"default": true, | |
"examples": [ | |
true | |
] | |
}, | |
"stop": { | |
"anyOf": [ | |
{ | |
"items": { | |
"type": "string" | |
}, | |
"type": "array" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Stop", | |
"description": "Up to 4 sequences where the API will stop generating further tokens.", | |
"examples": [ | |
null | |
] | |
}, | |
"max_tokens": { | |
"type": "integer", | |
"exclusiveMinimum": 0, | |
"title": "Max Tokens", | |
"description": " The maximum number of tokens to generate in the chat completion.\n Must be in [1, context_length - 1). Default is 2048.\n The total length of input tokens and generated tokens is limited by the model's context length.\n", | |
"default": 2048, | |
"examples": [ | |
2048 | |
] | |
}, | |
"presence_penalty": { | |
"type": "number", | |
"title": "Presence Penalty", | |
"description": " Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far,\n increasing the model's likelihood to talk about new topics.\n", | |
"default": 0, | |
"examples": [ | |
0 | |
] | |
}, | |
"frequency_penalty": { | |
"type": "number", | |
"title": "Frequency Penalty", | |
"description": " Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far,\n decreasing the model's likelihood to repeat the same line verbatim.\n", | |
"default": 0, | |
"examples": [ | |
0 | |
] | |
}, | |
"logit_bias": { | |
"type": "object", | |
"title": "Logit Bias", | |
"description": " Modify the likelihood of specified tokens appearing in the completion.\n Accepts a json object that maps tokens (specified by their token ID in the tokenizer)\n to an associated bias value from -100 to 100.\n Mathematically, the bias is added to the logits generated by the model prior to sampling.\n The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection;\n values like -100 or 100 should result in a ban or exclusive selection of the relevant token.\n", | |
"default": {}, | |
"examples": [ | |
{} | |
] | |
}, | |
"user": { | |
"type": "string", | |
"title": "User", | |
"description": "A unique identifier representing your end-user. For monitoring and debugging purposes.", | |
"default": "", | |
"examples": [ | |
"" | |
] | |
}, | |
"object": { | |
"type": "string", | |
"enum": [ | |
"gen_config.chat" | |
], | |
"const": "gen_config.chat", | |
"title": "Object", | |
"description": "The object type, which is always \"gen_config.chat\".", | |
"default": "gen_config.chat", | |
"examples": [ | |
"gen_config.chat" | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"messages" | |
], | |
"title": "ChatGenConfig" | |
}, | |
{ | |
"properties": { | |
"object": { | |
"type": "string", | |
"enum": [ | |
"gen_config.embed" | |
], | |
"const": "gen_config.embed", | |
"title": "Object", | |
"description": "The object type, which is always \"gen_config.embed\".", | |
"default": "gen_config.embed", | |
"examples": [ | |
"gen_config.embed" | |
] | |
}, | |
"embedding_model": { | |
"type": "string", | |
"title": "Embedding Model", | |
"description": "The embedding model to use.", | |
"examples": [ | |
"openai/text-embedding-3-small-512" | |
] | |
}, | |
"source_column": { | |
"type": "string", | |
"title": "Source Column", | |
"description": "The source column for embedding.", | |
"examples": [ | |
"text_column" | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"embedding_model", | |
"source_column" | |
], | |
"title": "EmbedGenConfig" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Gen Config", | |
"description": "_Optional_. Generation config. If provided, then this column will be an \"Output Column\". Table columns on its left can be referenced by `${column-name}`." | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"id" | |
], | |
"title": "ColumnSchema" | |
}, | |
"type": "array", | |
"title": "Cols", | |
"description": "List of column schema." | |
}, | |
"parent_id": { | |
"anyOf": [ | |
{ | |
"type": "string", | |
"pattern": "^[A-Za-z0-9]([A-Za-z0-9._-]{0,98}[A-Za-z0-9])?$", | |
"description": "Table name or ID. Must be unique with at least 1 character and up to 100 characters. Must start and end with an alphabet or number. Characters in the middle can include `_` (underscore), `-` (dash), `.` (dot)." | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Parent Id", | |
"description": "The parent table ID. If None (default), it means this is a template table." | |
}, | |
"title": { | |
"type": "string", | |
"title": "Title", | |
"description": "Chat title. Defaults to ''." | |
}, | |
"updated_at": { | |
"type": "string", | |
"title": "Updated At", | |
"description": "Table last update timestamp (ISO 8601 UTC)." | |
}, | |
"indexed_at_fts": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Indexed At Fts", | |
"description": "Table last FTS index timestamp (ISO 8601 UTC)." | |
}, | |
"indexed_at_vec": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Indexed At Vec", | |
"description": "Table last vector index timestamp (ISO 8601 UTC)." | |
}, | |
"indexed_at_sca": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Indexed At Sca", | |
"description": "Table last scalar index timestamp (ISO 8601 UTC)." | |
}, | |
"num_rows": { | |
"type": "integer", | |
"title": "Num Rows", | |
"description": "Number of rows in the table." | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"id", | |
"cols", | |
"parent_id", | |
"title", | |
"updated_at", | |
"indexed_at_fts", | |
"indexed_at_vec", | |
"indexed_at_sca", | |
"num_rows" | |
], | |
"title": "TableMetaResponse" | |
} | |
} | |
} | |
}, | |
"422": { | |
"description": "Validation Error", | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"detail": { | |
"items": { | |
"properties": { | |
"loc": { | |
"items": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "integer" | |
} | |
] | |
}, | |
"type": "array", | |
"title": "Location" | |
}, | |
"msg": { | |
"type": "string", | |
"title": "Message" | |
}, | |
"type": { | |
"type": "string", | |
"title": "Error Type" | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"loc", | |
"msg", | |
"type" | |
], | |
"title": "ValidationError" | |
}, | |
"type": "array", | |
"title": "Detail" | |
} | |
}, | |
"type": "object", | |
"title": "HTTPValidationError" | |
} | |
} | |
} | |
} | |
} | |
} | |
}, | |
"/api/v1/gen_tables/{table_type}/rename/{table_id_src}/{table_id_dst}": { | |
"post": { | |
"tags": [ | |
"Generative Table" | |
], | |
"summary": "Rename Table", | |
"operationId": "rename_table", | |
"parameters": [ | |
{ | |
"name": "table_type", | |
"in": "path", | |
"required": true, | |
"schema": { | |
"allOf": [ | |
{ | |
"type": "string", | |
"enum": [ | |
"action", | |
"knowledge", | |
"chat" | |
], | |
"title": "TableType" | |
} | |
], | |
"description": "Table type.", | |
"title": "Table Type" | |
}, | |
"description": "Table type." | |
}, | |
{ | |
"name": "table_id_src", | |
"in": "path", | |
"required": true, | |
"schema": { | |
"type": "string", | |
"description": "Source table name or ID.", | |
"title": "Table Id Src" | |
}, | |
"description": "Source table name or ID." | |
}, | |
{ | |
"name": "table_id_dst", | |
"in": "path", | |
"required": true, | |
"schema": { | |
"type": "string", | |
"pattern": "^[A-Za-z0-9]([A-Za-z0-9._-]{0,98}[A-Za-z0-9])?$", | |
"description": "Destination table name or ID.", | |
"title": "Table Id Dst" | |
}, | |
"description": "Destination table name or ID." | |
} | |
], | |
"responses": { | |
"200": { | |
"description": "Successful Response", | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"id": { | |
"type": "string", | |
"title": "Id", | |
"description": "Table name." | |
}, | |
"version": { | |
"type": "string", | |
"title": "Version", | |
"description": "Table version, following owl version.", | |
"default": "0.3.0" | |
}, | |
"meta": { | |
"type": "object", | |
"title": "Meta", | |
"description": "Additional metadata about the table.", | |
"default": {} | |
}, | |
"cols": { | |
"items": { | |
"properties": { | |
"id": { | |
"type": "string", | |
"title": "Id", | |
"description": "Column name." | |
}, | |
"dtype": { | |
"allOf": [ | |
{ | |
"type": "string", | |
"enum": [ | |
"int", | |
"int8", | |
"float", | |
"float32", | |
"float16", | |
"bool", | |
"str", | |
"date-time" | |
], | |
"title": "DtypeEnum" | |
} | |
], | |
"description": "Column data type, one of [\"int\", \"int8\", \"float\", \"float32\", \"float16\", \"bool\", \"str\", \"date-time\"]", | |
"default": "str" | |
}, | |
"vlen": { | |
"type": "integer", | |
"minimum": 0, | |
"title": "Vlen", | |
"description": "_Optional_. Vector length. If this is larger than zero, then `dtype` must be one of the floating data types. Defaults to zero.", | |
"default": 0 | |
}, | |
"index": { | |
"type": "boolean", | |
"title": "Index", | |
"description": "_Optional_. Whether to build full-text-search (FTS) or vector index for this column. Only applies to string and vector columns. Defaults to True.", | |
"default": true | |
}, | |
"gen_config": { | |
"anyOf": [ | |
{ | |
"properties": { | |
"id": { | |
"type": "string", | |
"title": "Id", | |
"description": "Chat ID. Must be unique against document ID for it to be embeddable. Defaults to ''.", | |
"default": "" | |
}, | |
"model": { | |
"type": "string", | |
"title": "Model", | |
"description": "ID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API.", | |
"default": "" | |
}, | |
"messages": { | |
"items": { | |
"properties": { | |
"role": { | |
"type": "string", | |
"enum": [ | |
"system", | |
"user", | |
"assistant" | |
], | |
"title": "ChatRole", | |
"description": "Represents who said a chat message." | |
}, | |
"content": { | |
"type": "string", | |
"title": "Content" | |
}, | |
"name": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Name" | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"role", | |
"content" | |
], | |
"title": "ChatEntry", | |
"description": "Represents a message in the chat context." | |
}, | |
"type": "array", | |
"minItems": 1, | |
"title": "Messages", | |
"description": "A list of messages comprising the conversation so far." | |
}, | |
"rag_params": { | |
"anyOf": [ | |
{ | |
"properties": { | |
"table_id": { | |
"type": "string", | |
"minLength": 2, | |
"title": "Table Id", | |
"description": "Knowledge Table ID", | |
"examples": [ | |
"my-dataset" | |
] | |
}, | |
"reranking_model": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Reranking Model", | |
"description": "Reranking model to use for hybrid search.", | |
"examples": [ | |
"cohere/rerank-multilingual-v3.0", | |
null | |
] | |
}, | |
"search_query": { | |
"type": "string", | |
"title": "Search Query", | |
"description": "Query used to retrieve items from the KB database. If not provided (default), it will be generated using LLM.", | |
"default": "" | |
}, | |
"k": { | |
"type": "integer", | |
"maximum": 1024, | |
"exclusiveMinimum": 0, | |
"title": "K", | |
"description": "Top-k closest text in terms of embedding distance. Must be in [1, 1024]. Defaults to 3.", | |
"default": 3, | |
"examples": [ | |
3 | |
] | |
}, | |
"rerank": { | |
"type": "boolean", | |
"title": "Rerank", | |
"description": "Flag to perform rerank on the retrieved results. Defaults to True.", | |
"default": true, | |
"examples": [ | |
true, | |
false | |
] | |
}, | |
"concat_reranker_input": { | |
"type": "boolean", | |
"title": "Concat Reranker Input", | |
"description": "Flag to concat title and content as reranker input. Defaults to False.", | |
"default": false, | |
"examples": [ | |
true, | |
false | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"table_id" | |
], | |
"title": "RAGParams" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"description": "Retrieval Augmented Generation search params. Defaults to None (disabled).", | |
"examples": [ | |
null | |
] | |
}, | |
"temperature": { | |
"type": "number", | |
"maximum": 2, | |
"minimum": 0.001, | |
"title": "Temperature", | |
"description": " What sampling temperature to use, in [0.001, 2.0].\n Higher values like 0.8 will make the output more random,\n while lower values like 0.2 will make it more focused and deterministic.\n", | |
"default": 0.2, | |
"examples": [ | |
0.2 | |
] | |
}, | |
"top_p": { | |
"type": "number", | |
"maximum": 1, | |
"minimum": 0.001, | |
"title": "Top P", | |
"description": " An alternative to sampling with temperature, called nucleus sampling,\n where the model considers the results of the tokens with top_p probability mass.\n So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n Must be in [0.001, 1.0].\n", | |
"default": 0.6, | |
"examples": [ | |
0.6 | |
] | |
}, | |
"n": { | |
"type": "integer", | |
"title": "N", | |
"description": "How many chat completion choices to generate for each input message.", | |
"default": 1, | |
"examples": [ | |
1 | |
] | |
}, | |
"stream": { | |
"type": "boolean", | |
"title": "Stream", | |
"description": "\n If set, partial message deltas will be sent, like in ChatGPT.\n Tokens will be sent as server-sent events as they become available,\n with the stream terminated by a 'data: [DONE]' message.\n", | |
"default": true, | |
"examples": [ | |
true | |
] | |
}, | |
"stop": { | |
"anyOf": [ | |
{ | |
"items": { | |
"type": "string" | |
}, | |
"type": "array" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Stop", | |
"description": "Up to 4 sequences where the API will stop generating further tokens.", | |
"examples": [ | |
null | |
] | |
}, | |
"max_tokens": { | |
"type": "integer", | |
"exclusiveMinimum": 0, | |
"title": "Max Tokens", | |
"description": " The maximum number of tokens to generate in the chat completion.\n Must be in [1, context_length - 1). Default is 2048.\n The total length of input tokens and generated tokens is limited by the model's context length.\n", | |
"default": 2048, | |
"examples": [ | |
2048 | |
] | |
}, | |
"presence_penalty": { | |
"type": "number", | |
"title": "Presence Penalty", | |
"description": " Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far,\n increasing the model's likelihood to talk about new topics.\n", | |
"default": 0, | |
"examples": [ | |
0 | |
] | |
}, | |
"frequency_penalty": { | |
"type": "number", | |
"title": "Frequency Penalty", | |
"description": " Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far,\n decreasing the model's likelihood to repeat the same line verbatim.\n", | |
"default": 0, | |
"examples": [ | |
0 | |
] | |
}, | |
"logit_bias": { | |
"type": "object", | |
"title": "Logit Bias", | |
"description": " Modify the likelihood of specified tokens appearing in the completion.\n Accepts a json object that maps tokens (specified by their token ID in the tokenizer)\n to an associated bias value from -100 to 100.\n Mathematically, the bias is added to the logits generated by the model prior to sampling.\n The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection;\n values like -100 or 100 should result in a ban or exclusive selection of the relevant token.\n", | |
"default": {}, | |
"examples": [ | |
{} | |
] | |
}, | |
"user": { | |
"type": "string", | |
"title": "User", | |
"description": "A unique identifier representing your end-user. For monitoring and debugging purposes.", | |
"default": "", | |
"examples": [ | |
"" | |
] | |
}, | |
"object": { | |
"type": "string", | |
"enum": [ | |
"gen_config.chat" | |
], | |
"const": "gen_config.chat", | |
"title": "Object", | |
"description": "The object type, which is always \"gen_config.chat\".", | |
"default": "gen_config.chat", | |
"examples": [ | |
"gen_config.chat" | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"messages" | |
], | |
"title": "ChatGenConfig" | |
}, | |
{ | |
"properties": { | |
"object": { | |
"type": "string", | |
"enum": [ | |
"gen_config.embed" | |
], | |
"const": "gen_config.embed", | |
"title": "Object", | |
"description": "The object type, which is always \"gen_config.embed\".", | |
"default": "gen_config.embed", | |
"examples": [ | |
"gen_config.embed" | |
] | |
}, | |
"embedding_model": { | |
"type": "string", | |
"title": "Embedding Model", | |
"description": "The embedding model to use.", | |
"examples": [ | |
"openai/text-embedding-3-small-512" | |
] | |
}, | |
"source_column": { | |
"type": "string", | |
"title": "Source Column", | |
"description": "The source column for embedding.", | |
"examples": [ | |
"text_column" | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"embedding_model", | |
"source_column" | |
], | |
"title": "EmbedGenConfig" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Gen Config", | |
"description": "_Optional_. Generation config. If provided, then this column will be an \"Output Column\". Table columns on its left can be referenced by `${column-name}`." | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"id" | |
], | |
"title": "ColumnSchema" | |
}, | |
"type": "array", | |
"title": "Cols", | |
"description": "List of column schema." | |
}, | |
"parent_id": { | |
"anyOf": [ | |
{ | |
"type": "string", | |
"pattern": "^[A-Za-z0-9]([A-Za-z0-9._-]{0,98}[A-Za-z0-9])?$", | |
"description": "Table name or ID. Must be unique with at least 1 character and up to 100 characters. Must start and end with an alphabet or number. Characters in the middle can include `_` (underscore), `-` (dash), `.` (dot)." | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Parent Id", | |
"description": "The parent table ID. If None (default), it means this is a template table." | |
}, | |
"title": { | |
"type": "string", | |
"title": "Title", | |
"description": "Chat title. Defaults to ''." | |
}, | |
"updated_at": { | |
"type": "string", | |
"title": "Updated At", | |
"description": "Table last update timestamp (ISO 8601 UTC)." | |
}, | |
"indexed_at_fts": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Indexed At Fts", | |
"description": "Table last FTS index timestamp (ISO 8601 UTC)." | |
}, | |
"indexed_at_vec": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Indexed At Vec", | |
"description": "Table last vector index timestamp (ISO 8601 UTC)." | |
}, | |
"indexed_at_sca": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Indexed At Sca", | |
"description": "Table last scalar index timestamp (ISO 8601 UTC)." | |
}, | |
"num_rows": { | |
"type": "integer", | |
"title": "Num Rows", | |
"description": "Number of rows in the table." | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"id", | |
"cols", | |
"parent_id", | |
"title", | |
"updated_at", | |
"indexed_at_fts", | |
"indexed_at_vec", | |
"indexed_at_sca", | |
"num_rows" | |
], | |
"title": "TableMetaResponse" | |
} | |
} | |
} | |
}, | |
"422": { | |
"description": "Validation Error", | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"detail": { | |
"items": { | |
"properties": { | |
"loc": { | |
"items": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "integer" | |
} | |
] | |
}, | |
"type": "array", | |
"title": "Location" | |
}, | |
"msg": { | |
"type": "string", | |
"title": "Message" | |
}, | |
"type": { | |
"type": "string", | |
"title": "Error Type" | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"loc", | |
"msg", | |
"type" | |
], | |
"title": "ValidationError" | |
}, | |
"type": "array", | |
"title": "Detail" | |
} | |
}, | |
"type": "object", | |
"title": "HTTPValidationError" | |
} | |
} | |
} | |
} | |
} | |
} | |
}, | |
"/api/v1/gen_tables/{table_type}/{table_id}": { | |
"delete": { | |
"tags": [ | |
"Generative Table" | |
], | |
"summary": "Delete Table", | |
"operationId": "delete_table", | |
"parameters": [ | |
{ | |
"name": "table_type", | |
"in": "path", | |
"required": true, | |
"schema": { | |
"allOf": [ | |
{ | |
"type": "string", | |
"enum": [ | |
"action", | |
"knowledge", | |
"chat" | |
], | |
"title": "TableType" | |
} | |
], | |
"description": "Table type.", | |
"title": "Table Type" | |
}, | |
"description": "Table type." | |
}, | |
{ | |
"name": "table_id", | |
"in": "path", | |
"required": true, | |
"schema": { | |
"type": "string", | |
"description": "The ID of the table to delete.", | |
"title": "Table Id" | |
}, | |
"description": "The ID of the table to delete." | |
} | |
], | |
"responses": { | |
"200": { | |
"description": "Successful Response", | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"ok": { | |
"type": "boolean", | |
"title": "Ok", | |
"default": true | |
} | |
}, | |
"type": "object", | |
"title": "OkResponse" | |
} | |
} | |
} | |
}, | |
"422": { | |
"description": "Validation Error", | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"detail": { | |
"items": { | |
"properties": { | |
"loc": { | |
"items": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "integer" | |
} | |
] | |
}, | |
"type": "array", | |
"title": "Location" | |
}, | |
"msg": { | |
"type": "string", | |
"title": "Message" | |
}, | |
"type": { | |
"type": "string", | |
"title": "Error Type" | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"loc", | |
"msg", | |
"type" | |
], | |
"title": "ValidationError" | |
}, | |
"type": "array", | |
"title": "Detail" | |
} | |
}, | |
"type": "object", | |
"title": "HTTPValidationError" | |
} | |
} | |
} | |
} | |
} | |
}, | |
"get": { | |
"tags": [ | |
"Generative Table" | |
], | |
"summary": "Get Table", | |
"operationId": "get_table", | |
"parameters": [ | |
{ | |
"name": "table_type", | |
"in": "path", | |
"required": true, | |
"schema": { | |
"allOf": [ | |
{ | |
"type": "string", | |
"enum": [ | |
"action", | |
"knowledge", | |
"chat" | |
], | |
"title": "TableType" | |
} | |
], | |
"description": "Table type.", | |
"title": "Table Type" | |
}, | |
"description": "Table type." | |
}, | |
{ | |
"name": "table_id", | |
"in": "path", | |
"required": true, | |
"schema": { | |
"type": "string", | |
"pattern": "^[A-Za-z0-9]([A-Za-z0-9._-]{0,98}[A-Za-z0-9])?$", | |
"description": "The ID of the table to fetch.", | |
"title": "Table Id" | |
}, | |
"description": "The ID of the table to fetch." | |
} | |
], | |
"responses": { | |
"200": { | |
"description": "Successful Response", | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"id": { | |
"type": "string", | |
"title": "Id", | |
"description": "Table name." | |
}, | |
"version": { | |
"type": "string", | |
"title": "Version", | |
"description": "Table version, following owl version.", | |
"default": "0.3.0" | |
}, | |
"meta": { | |
"type": "object", | |
"title": "Meta", | |
"description": "Additional metadata about the table.", | |
"default": {} | |
}, | |
"cols": { | |
"items": { | |
"properties": { | |
"id": { | |
"type": "string", | |
"title": "Id", | |
"description": "Column name." | |
}, | |
"dtype": { | |
"allOf": [ | |
{ | |
"type": "string", | |
"enum": [ | |
"int", | |
"int8", | |
"float", | |
"float32", | |
"float16", | |
"bool", | |
"str", | |
"date-time" | |
], | |
"title": "DtypeEnum" | |
} | |
], | |
"description": "Column data type, one of [\"int\", \"int8\", \"float\", \"float32\", \"float16\", \"bool\", \"str\", \"date-time\"]", | |
"default": "str" | |
}, | |
"vlen": { | |
"type": "integer", | |
"minimum": 0, | |
"title": "Vlen", | |
"description": "_Optional_. Vector length. If this is larger than zero, then `dtype` must be one of the floating data types. Defaults to zero.", | |
"default": 0 | |
}, | |
"index": { | |
"type": "boolean", | |
"title": "Index", | |
"description": "_Optional_. Whether to build full-text-search (FTS) or vector index for this column. Only applies to string and vector columns. Defaults to True.", | |
"default": true | |
}, | |
"gen_config": { | |
"anyOf": [ | |
{ | |
"properties": { | |
"id": { | |
"type": "string", | |
"title": "Id", | |
"description": "Chat ID. Must be unique against document ID for it to be embeddable. Defaults to ''.", | |
"default": "" | |
}, | |
"model": { | |
"type": "string", | |
"title": "Model", | |
"description": "ID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API.", | |
"default": "" | |
}, | |
"messages": { | |
"items": { | |
"properties": { | |
"role": { | |
"type": "string", | |
"enum": [ | |
"system", | |
"user", | |
"assistant" | |
], | |
"title": "ChatRole", | |
"description": "Represents who said a chat message." | |
}, | |
"content": { | |
"type": "string", | |
"title": "Content" | |
}, | |
"name": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Name" | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"role", | |
"content" | |
], | |
"title": "ChatEntry", | |
"description": "Represents a message in the chat context." | |
}, | |
"type": "array", | |
"minItems": 1, | |
"title": "Messages", | |
"description": "A list of messages comprising the conversation so far." | |
}, | |
"rag_params": { | |
"anyOf": [ | |
{ | |
"properties": { | |
"table_id": { | |
"type": "string", | |
"minLength": 2, | |
"title": "Table Id", | |
"description": "Knowledge Table ID", | |
"examples": [ | |
"my-dataset" | |
] | |
}, | |
"reranking_model": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Reranking Model", | |
"description": "Reranking model to use for hybrid search.", | |
"examples": [ | |
"cohere/rerank-multilingual-v3.0", | |
null | |
] | |
}, | |
"search_query": { | |
"type": "string", | |
"title": "Search Query", | |
"description": "Query used to retrieve items from the KB database. If not provided (default), it will be generated using LLM.", | |
"default": "" | |
}, | |
"k": { | |
"type": "integer", | |
"maximum": 1024, | |
"exclusiveMinimum": 0, | |
"title": "K", | |
"description": "Top-k closest text in terms of embedding distance. Must be in [1, 1024]. Defaults to 3.", | |
"default": 3, | |
"examples": [ | |
3 | |
] | |
}, | |
"rerank": { | |
"type": "boolean", | |
"title": "Rerank", | |
"description": "Flag to perform rerank on the retrieved results. Defaults to True.", | |
"default": true, | |
"examples": [ | |
true, | |
false | |
] | |
}, | |
"concat_reranker_input": { | |
"type": "boolean", | |
"title": "Concat Reranker Input", | |
"description": "Flag to concat title and content as reranker input. Defaults to False.", | |
"default": false, | |
"examples": [ | |
true, | |
false | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"table_id" | |
], | |
"title": "RAGParams" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"description": "Retrieval Augmented Generation search params. Defaults to None (disabled).", | |
"examples": [ | |
null | |
] | |
}, | |
"temperature": { | |
"type": "number", | |
"maximum": 2, | |
"minimum": 0.001, | |
"title": "Temperature", | |
"description": " What sampling temperature to use, in [0.001, 2.0].\n Higher values like 0.8 will make the output more random,\n while lower values like 0.2 will make it more focused and deterministic.\n", | |
"default": 0.2, | |
"examples": [ | |
0.2 | |
] | |
}, | |
"top_p": { | |
"type": "number", | |
"maximum": 1, | |
"minimum": 0.001, | |
"title": "Top P", | |
"description": " An alternative to sampling with temperature, called nucleus sampling,\n where the model considers the results of the tokens with top_p probability mass.\n So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n Must be in [0.001, 1.0].\n", | |
"default": 0.6, | |
"examples": [ | |
0.6 | |
] | |
}, | |
"n": { | |
"type": "integer", | |
"title": "N", | |
"description": "How many chat completion choices to generate for each input message.", | |
"default": 1, | |
"examples": [ | |
1 | |
] | |
}, | |
"stream": { | |
"type": "boolean", | |
"title": "Stream", | |
"description": "\n If set, partial message deltas will be sent, like in ChatGPT.\n Tokens will be sent as server-sent events as they become available,\n with the stream terminated by a 'data: [DONE]' message.\n", | |
"default": true, | |
"examples": [ | |
true | |
] | |
}, | |
"stop": { | |
"anyOf": [ | |
{ | |
"items": { | |
"type": "string" | |
}, | |
"type": "array" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Stop", | |
"description": "Up to 4 sequences where the API will stop generating further tokens.", | |
"examples": [ | |
null | |
] | |
}, | |
"max_tokens": { | |
"type": "integer", | |
"exclusiveMinimum": 0, | |
"title": "Max Tokens", | |
"description": " The maximum number of tokens to generate in the chat completion.\n Must be in [1, context_length - 1). Default is 2048.\n The total length of input tokens and generated tokens is limited by the model's context length.\n", | |
"default": 2048, | |
"examples": [ | |
2048 | |
] | |
}, | |
"presence_penalty": { | |
"type": "number", | |
"title": "Presence Penalty", | |
"description": " Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far,\n increasing the model's likelihood to talk about new topics.\n", | |
"default": 0, | |
"examples": [ | |
0 | |
] | |
}, | |
"frequency_penalty": { | |
"type": "number", | |
"title": "Frequency Penalty", | |
"description": " Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far,\n decreasing the model's likelihood to repeat the same line verbatim.\n", | |
"default": 0, | |
"examples": [ | |
0 | |
] | |
}, | |
"logit_bias": { | |
"type": "object", | |
"title": "Logit Bias", | |
"description": " Modify the likelihood of specified tokens appearing in the completion.\n Accepts a json object that maps tokens (specified by their token ID in the tokenizer)\n to an associated bias value from -100 to 100.\n Mathematically, the bias is added to the logits generated by the model prior to sampling.\n The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection;\n values like -100 or 100 should result in a ban or exclusive selection of the relevant token.\n", | |
"default": {}, | |
"examples": [ | |
{} | |
] | |
}, | |
"user": { | |
"type": "string", | |
"title": "User", | |
"description": "A unique identifier representing your end-user. For monitoring and debugging purposes.", | |
"default": "", | |
"examples": [ | |
"" | |
] | |
}, | |
"object": { | |
"type": "string", | |
"enum": [ | |
"gen_config.chat" | |
], | |
"const": "gen_config.chat", | |
"title": "Object", | |
"description": "The object type, which is always \"gen_config.chat\".", | |
"default": "gen_config.chat", | |
"examples": [ | |
"gen_config.chat" | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"messages" | |
], | |
"title": "ChatGenConfig" | |
}, | |
{ | |
"properties": { | |
"object": { | |
"type": "string", | |
"enum": [ | |
"gen_config.embed" | |
], | |
"const": "gen_config.embed", | |
"title": "Object", | |
"description": "The object type, which is always \"gen_config.embed\".", | |
"default": "gen_config.embed", | |
"examples": [ | |
"gen_config.embed" | |
] | |
}, | |
"embedding_model": { | |
"type": "string", | |
"title": "Embedding Model", | |
"description": "The embedding model to use.", | |
"examples": [ | |
"openai/text-embedding-3-small-512" | |
] | |
}, | |
"source_column": { | |
"type": "string", | |
"title": "Source Column", | |
"description": "The source column for embedding.", | |
"examples": [ | |
"text_column" | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"embedding_model", | |
"source_column" | |
], | |
"title": "EmbedGenConfig" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Gen Config", | |
"description": "_Optional_. Generation config. If provided, then this column will be an \"Output Column\". Table columns on its left can be referenced by `${column-name}`." | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"id" | |
], | |
"title": "ColumnSchema" | |
}, | |
"type": "array", | |
"title": "Cols", | |
"description": "List of column schema." | |
}, | |
"parent_id": { | |
"anyOf": [ | |
{ | |
"type": "string", | |
"pattern": "^[A-Za-z0-9]([A-Za-z0-9._-]{0,98}[A-Za-z0-9])?$", | |
"description": "Table name or ID. Must be unique with at least 1 character and up to 100 characters. Must start and end with an alphabet or number. Characters in the middle can include `_` (underscore), `-` (dash), `.` (dot)." | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Parent Id", | |
"description": "The parent table ID. If None (default), it means this is a template table." | |
}, | |
"title": { | |
"type": "string", | |
"title": "Title", | |
"description": "Chat title. Defaults to ''." | |
}, | |
"updated_at": { | |
"type": "string", | |
"title": "Updated At", | |
"description": "Table last update timestamp (ISO 8601 UTC)." | |
}, | |
"indexed_at_fts": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Indexed At Fts", | |
"description": "Table last FTS index timestamp (ISO 8601 UTC)." | |
}, | |
"indexed_at_vec": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Indexed At Vec", | |
"description": "Table last vector index timestamp (ISO 8601 UTC)." | |
}, | |
"indexed_at_sca": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Indexed At Sca", | |
"description": "Table last scalar index timestamp (ISO 8601 UTC)." | |
}, | |
"num_rows": { | |
"type": "integer", | |
"title": "Num Rows", | |
"description": "Number of rows in the table." | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"id", | |
"cols", | |
"parent_id", | |
"title", | |
"updated_at", | |
"indexed_at_fts", | |
"indexed_at_vec", | |
"indexed_at_sca", | |
"num_rows" | |
], | |
"title": "TableMetaResponse" | |
} | |
} | |
} | |
}, | |
"422": { | |
"description": "Validation Error", | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"detail": { | |
"items": { | |
"properties": { | |
"loc": { | |
"items": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "integer" | |
} | |
] | |
}, | |
"type": "array", | |
"title": "Location" | |
}, | |
"msg": { | |
"type": "string", | |
"title": "Message" | |
}, | |
"type": { | |
"type": "string", | |
"title": "Error Type" | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"loc", | |
"msg", | |
"type" | |
], | |
"title": "ValidationError" | |
}, | |
"type": "array", | |
"title": "Detail" | |
} | |
}, | |
"type": "object", | |
"title": "HTTPValidationError" | |
} | |
} | |
} | |
} | |
} | |
} | |
}, | |
"/api/v1/gen_tables/{table_type}": { | |
"get": { | |
"tags": [ | |
"Generative Table" | |
], | |
"summary": "List Tables", | |
"operationId": "list_tables", | |
"parameters": [ | |
{ | |
"name": "table_type", | |
"in": "path", | |
"required": true, | |
"schema": { | |
"allOf": [ | |
{ | |
"type": "string", | |
"enum": [ | |
"action", | |
"knowledge", | |
"chat" | |
], | |
"title": "TableType" | |
} | |
], | |
"description": "Table type.", | |
"title": "Table Type" | |
}, | |
"description": "Table type." | |
}, | |
{ | |
"name": "offset", | |
"in": "query", | |
"required": false, | |
"schema": { | |
"type": "integer", | |
"minimum": 0, | |
"description": "_Optional_. Item offset for pagination. Defaults to 0.", | |
"default": 0, | |
"title": "Offset" | |
}, | |
"description": "_Optional_. Item offset for pagination. Defaults to 0." | |
}, | |
{ | |
"name": "limit", | |
"in": "query", | |
"required": false, | |
"schema": { | |
"type": "integer", | |
"maximum": 100, | |
"exclusiveMinimum": 0, | |
"description": "_Optional_. Number of tables to return (min 1, max 100). Defaults to 100.", | |
"default": 100, | |
"title": "Limit" | |
}, | |
"description": "_Optional_. Number of tables to return (min 1, max 100). Defaults to 100." | |
}, | |
{ | |
"name": "parent_id", | |
"in": "query", | |
"required": false, | |
"schema": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"description": "_Optional_. Parent ID of tables to return. Defaults to None (return all tables). Additionally for Chat Table, you can list: (1) all chat agents by passing in \"_agent_\"; or (2) all chats by passing in \"_chat_\".", | |
"title": "Parent Id" | |
}, | |
"description": "_Optional_. Parent ID of tables to return. Defaults to None (return all tables). Additionally for Chat Table, you can list: (1) all chat agents by passing in \"_agent_\"; or (2) all chats by passing in \"_chat_\"." | |
}, | |
{ | |
"name": "search_query", | |
"in": "query", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"maxLength": 100, | |
"description": "_Optional_. A string to search for within table IDs as a filter. Defaults to \"\" (no filter).", | |
"default": "", | |
"title": "Search Query" | |
}, | |
"description": "_Optional_. A string to search for within table IDs as a filter. Defaults to \"\" (no filter)." | |
}, | |
{ | |
"name": "order_by", | |
"in": "query", | |
"required": false, | |
"schema": { | |
"title": "Order By", | |
"enum": [ | |
"id", | |
"updated_at" | |
], | |
"type": "string", | |
"minLength": 1, | |
"description": "_Optional_. Sort tables by this attribute. Defaults to \"updated_at\".", | |
"default": "updated_at" | |
}, | |
"description": "_Optional_. Sort tables by this attribute. Defaults to \"updated_at\"." | |
}, | |
{ | |
"name": "order_descending", | |
"in": "query", | |
"required": false, | |
"schema": { | |
"type": "boolean", | |
"description": "_Optional_. Whether to sort by descending order. Defaults to True.", | |
"default": true, | |
"title": "Order Descending" | |
}, | |
"description": "_Optional_. Whether to sort by descending order. Defaults to True." | |
}, | |
{ | |
"name": "count_rows", | |
"in": "query", | |
"required": false, | |
"schema": { | |
"type": "boolean", | |
"description": "_Optional_. Whether to count the rows of the tables. Defaults to False.", | |
"default": false, | |
"title": "Count Rows" | |
}, | |
"description": "_Optional_. Whether to count the rows of the tables. Defaults to False." | |
} | |
], | |
"responses": { | |
"200": { | |
"description": "Successful Response", | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"items": { | |
"items": { | |
"properties": { | |
"id": { | |
"type": "string", | |
"title": "Id", | |
"description": "Table name." | |
}, | |
"version": { | |
"type": "string", | |
"title": "Version", | |
"description": "Table version, following owl version.", | |
"default": "0.3.0" | |
}, | |
"meta": { | |
"type": "object", | |
"title": "Meta", | |
"description": "Additional metadata about the table.", | |
"default": {} | |
}, | |
"cols": { | |
"items": { | |
"properties": { | |
"id": { | |
"type": "string", | |
"title": "Id", | |
"description": "Column name." | |
}, | |
"dtype": { | |
"allOf": [ | |
{ | |
"type": "string", | |
"enum": [ | |
"int", | |
"int8", | |
"float", | |
"float32", | |
"float16", | |
"bool", | |
"str", | |
"date-time" | |
], | |
"title": "DtypeEnum" | |
} | |
], | |
"description": "Column data type, one of [\"int\", \"int8\", \"float\", \"float32\", \"float16\", \"bool\", \"str\", \"date-time\"]", | |
"default": "str" | |
}, | |
"vlen": { | |
"type": "integer", | |
"minimum": 0, | |
"title": "Vlen", | |
"description": "_Optional_. Vector length. If this is larger than zero, then `dtype` must be one of the floating data types. Defaults to zero.", | |
"default": 0 | |
}, | |
"index": { | |
"type": "boolean", | |
"title": "Index", | |
"description": "_Optional_. Whether to build full-text-search (FTS) or vector index for this column. Only applies to string and vector columns. Defaults to True.", | |
"default": true | |
}, | |
"gen_config": { | |
"anyOf": [ | |
{ | |
"properties": { | |
"id": { | |
"type": "string", | |
"title": "Id", | |
"description": "Chat ID. Must be unique against document ID for it to be embeddable. Defaults to ''.", | |
"default": "" | |
}, | |
"model": { | |
"type": "string", | |
"title": "Model", | |
"description": "ID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API.", | |
"default": "" | |
}, | |
"messages": { | |
"items": { | |
"properties": { | |
"role": { | |
"type": "string", | |
"enum": [ | |
"system", | |
"user", | |
"assistant" | |
], | |
"title": "ChatRole", | |
"description": "Represents who said a chat message." | |
}, | |
"content": { | |
"type": "string", | |
"title": "Content" | |
}, | |
"name": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Name" | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"role", | |
"content" | |
], | |
"title": "ChatEntry", | |
"description": "Represents a message in the chat context." | |
}, | |
"type": "array", | |
"minItems": 1, | |
"title": "Messages", | |
"description": "A list of messages comprising the conversation so far." | |
}, | |
"rag_params": { | |
"anyOf": [ | |
{ | |
"properties": { | |
"table_id": { | |
"type": "string", | |
"minLength": 2, | |
"title": "Table Id", | |
"description": "Knowledge Table ID", | |
"examples": [ | |
"my-dataset" | |
] | |
}, | |
"reranking_model": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Reranking Model", | |
"description": "Reranking model to use for hybrid search.", | |
"examples": [ | |
"cohere/rerank-multilingual-v3.0", | |
null | |
] | |
}, | |
"search_query": { | |
"type": "string", | |
"title": "Search Query", | |
"description": "Query used to retrieve items from the KB database. If not provided (default), it will be generated using LLM.", | |
"default": "" | |
}, | |
"k": { | |
"type": "integer", | |
"maximum": 1024, | |
"exclusiveMinimum": 0, | |
"title": "K", | |
"description": "Top-k closest text in terms of embedding distance. Must be in [1, 1024]. Defaults to 3.", | |
"default": 3, | |
"examples": [ | |
3 | |
] | |
}, | |
"rerank": { | |
"type": "boolean", | |
"title": "Rerank", | |
"description": "Flag to perform rerank on the retrieved results. Defaults to True.", | |
"default": true, | |
"examples": [ | |
true, | |
false | |
] | |
}, | |
"concat_reranker_input": { | |
"type": "boolean", | |
"title": "Concat Reranker Input", | |
"description": "Flag to concat title and content as reranker input. Defaults to False.", | |
"default": false, | |
"examples": [ | |
true, | |
false | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"table_id" | |
], | |
"title": "RAGParams" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"description": "Retrieval Augmented Generation search params. Defaults to None (disabled).", | |
"examples": [ | |
null | |
] | |
}, | |
"temperature": { | |
"type": "number", | |
"maximum": 2, | |
"minimum": 0.001, | |
"title": "Temperature", | |
"description": " What sampling temperature to use, in [0.001, 2.0].\n Higher values like 0.8 will make the output more random,\n while lower values like 0.2 will make it more focused and deterministic.\n", | |
"default": 0.2, | |
"examples": [ | |
0.2 | |
] | |
}, | |
"top_p": { | |
"type": "number", | |
"maximum": 1, | |
"minimum": 0.001, | |
"title": "Top P", | |
"description": " An alternative to sampling with temperature, called nucleus sampling,\n where the model considers the results of the tokens with top_p probability mass.\n So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n Must be in [0.001, 1.0].\n", | |
"default": 0.6, | |
"examples": [ | |
0.6 | |
] | |
}, | |
"n": { | |
"type": "integer", | |
"title": "N", | |
"description": "How many chat completion choices to generate for each input message.", | |
"default": 1, | |
"examples": [ | |
1 | |
] | |
}, | |
"stream": { | |
"type": "boolean", | |
"title": "Stream", | |
"description": "\n If set, partial message deltas will be sent, like in ChatGPT.\n Tokens will be sent as server-sent events as they become available,\n with the stream terminated by a 'data: [DONE]' message.\n", | |
"default": true, | |
"examples": [ | |
true | |
] | |
}, | |
"stop": { | |
"anyOf": [ | |
{ | |
"items": { | |
"type": "string" | |
}, | |
"type": "array" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Stop", | |
"description": "Up to 4 sequences where the API will stop generating further tokens.", | |
"examples": [ | |
null | |
] | |
}, | |
"max_tokens": { | |
"type": "integer", | |
"exclusiveMinimum": 0, | |
"title": "Max Tokens", | |
"description": " The maximum number of tokens to generate in the chat completion.\n Must be in [1, context_length - 1). Default is 2048.\n The total length of input tokens and generated tokens is limited by the model's context length.\n", | |
"default": 2048, | |
"examples": [ | |
2048 | |
] | |
}, | |
"presence_penalty": { | |
"type": "number", | |
"title": "Presence Penalty", | |
"description": " Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far,\n increasing the model's likelihood to talk about new topics.\n", | |
"default": 0, | |
"examples": [ | |
0 | |
] | |
}, | |
"frequency_penalty": { | |
"type": "number", | |
"title": "Frequency Penalty", | |
"description": " Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far,\n decreasing the model's likelihood to repeat the same line verbatim.\n", | |
"default": 0, | |
"examples": [ | |
0 | |
] | |
}, | |
"logit_bias": { | |
"type": "object", | |
"title": "Logit Bias", | |
"description": " Modify the likelihood of specified tokens appearing in the completion.\n Accepts a json object that maps tokens (specified by their token ID in the tokenizer)\n to an associated bias value from -100 to 100.\n Mathematically, the bias is added to the logits generated by the model prior to sampling.\n The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection;\n values like -100 or 100 should result in a ban or exclusive selection of the relevant token.\n", | |
"default": {}, | |
"examples": [ | |
{} | |
] | |
}, | |
"user": { | |
"type": "string", | |
"title": "User", | |
"description": "A unique identifier representing your end-user. For monitoring and debugging purposes.", | |
"default": "", | |
"examples": [ | |
"" | |
] | |
}, | |
"object": { | |
"type": "string", | |
"enum": [ | |
"gen_config.chat" | |
], | |
"const": "gen_config.chat", | |
"title": "Object", | |
"description": "The object type, which is always \"gen_config.chat\".", | |
"default": "gen_config.chat", | |
"examples": [ | |
"gen_config.chat" | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"messages" | |
], | |
"title": "ChatGenConfig" | |
}, | |
{ | |
"properties": { | |
"object": { | |
"type": "string", | |
"enum": [ | |
"gen_config.embed" | |
], | |
"const": "gen_config.embed", | |
"title": "Object", | |
"description": "The object type, which is always \"gen_config.embed\".", | |
"default": "gen_config.embed", | |
"examples": [ | |
"gen_config.embed" | |
] | |
}, | |
"embedding_model": { | |
"type": "string", | |
"title": "Embedding Model", | |
"description": "The embedding model to use.", | |
"examples": [ | |
"openai/text-embedding-3-small-512" | |
] | |
}, | |
"source_column": { | |
"type": "string", | |
"title": "Source Column", | |
"description": "The source column for embedding.", | |
"examples": [ | |
"text_column" | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"embedding_model", | |
"source_column" | |
], | |
"title": "EmbedGenConfig" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Gen Config", | |
"description": "_Optional_. Generation config. If provided, then this column will be an \"Output Column\". Table columns on its left can be referenced by `${column-name}`." | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"id" | |
], | |
"title": "ColumnSchema" | |
}, | |
"type": "array", | |
"title": "Cols", | |
"description": "List of column schema." | |
}, | |
"parent_id": { | |
"anyOf": [ | |
{ | |
"type": "string", | |
"pattern": "^[A-Za-z0-9]([A-Za-z0-9._-]{0,98}[A-Za-z0-9])?$", | |
"description": "Table name or ID. Must be unique with at least 1 character and up to 100 characters. Must start and end with an alphabet or number. Characters in the middle can include `_` (underscore), `-` (dash), `.` (dot)." | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Parent Id", | |
"description": "The parent table ID. If None (default), it means this is a template table." | |
}, | |
"title": { | |
"type": "string", | |
"title": "Title", | |
"description": "Chat title. Defaults to ''." | |
}, | |
"updated_at": { | |
"type": "string", | |
"title": "Updated At", | |
"description": "Table last update timestamp (ISO 8601 UTC)." | |
}, | |
"indexed_at_fts": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Indexed At Fts", | |
"description": "Table last FTS index timestamp (ISO 8601 UTC)." | |
}, | |
"indexed_at_vec": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Indexed At Vec", | |
"description": "Table last vector index timestamp (ISO 8601 UTC)." | |
}, | |
"indexed_at_sca": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Indexed At Sca", | |
"description": "Table last scalar index timestamp (ISO 8601 UTC)." | |
}, | |
"num_rows": { | |
"type": "integer", | |
"title": "Num Rows", | |
"description": "Number of rows in the table." | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"id", | |
"cols", | |
"parent_id", | |
"title", | |
"updated_at", | |
"indexed_at_fts", | |
"indexed_at_vec", | |
"indexed_at_sca", | |
"num_rows" | |
], | |
"title": "TableMetaResponse" | |
}, | |
"type": "array", | |
"title": "Items", | |
"description": "List of items paginated items.", | |
"default": [], | |
"examples": [ | |
[] | |
] | |
}, | |
"offset": { | |
"type": "integer", | |
"title": "Offset", | |
"description": "Number of skipped items.", | |
"default": 0, | |
"examples": [ | |
0 | |
] | |
}, | |
"limit": { | |
"type": "integer", | |
"title": "Limit", | |
"description": "Number of items per page.", | |
"default": 0, | |
"examples": [ | |
0 | |
] | |
}, | |
"total": { | |
"type": "integer", | |
"title": "Total", | |
"description": "Total number of items.", | |
"default": 0, | |
"examples": [ | |
0 | |
] | |
} | |
}, | |
"type": "object", | |
"title": "Page[TableMetaResponse]" | |
} | |
} | |
} | |
}, | |
"422": { | |
"description": "Validation Error", | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"detail": { | |
"items": { | |
"properties": { | |
"loc": { | |
"items": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "integer" | |
} | |
] | |
}, | |
"type": "array", | |
"title": "Location" | |
}, | |
"msg": { | |
"type": "string", | |
"title": "Message" | |
}, | |
"type": { | |
"type": "string", | |
"title": "Error Type" | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"loc", | |
"msg", | |
"type" | |
], | |
"title": "ValidationError" | |
}, | |
"type": "array", | |
"title": "Detail" | |
} | |
}, | |
"type": "object", | |
"title": "HTTPValidationError" | |
} | |
} | |
} | |
} | |
} | |
} | |
}, | |
"/api/v1/gen_tables/{table_type}/gen_config/update": { | |
"post": { | |
"tags": [ | |
"Generative Table" | |
], | |
"summary": "Update Gen Config", | |
"operationId": "update_gen_config", | |
"parameters": [ | |
{ | |
"name": "table_type", | |
"in": "path", | |
"required": true, | |
"schema": { | |
"allOf": [ | |
{ | |
"type": "string", | |
"enum": [ | |
"action", | |
"knowledge", | |
"chat" | |
], | |
"title": "TableType" | |
} | |
], | |
"description": "Table type.", | |
"title": "Table Type" | |
}, | |
"description": "Table type." | |
}, | |
{ | |
"name": "openai-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "OpenAI API key.", | |
"default": "", | |
"title": "Openai-Api-Key" | |
}, | |
"description": "OpenAI API key." | |
}, | |
{ | |
"name": "anthropic-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Anthropic API key.", | |
"default": "", | |
"title": "Anthropic-Api-Key" | |
}, | |
"description": "Anthropic API key." | |
}, | |
{ | |
"name": "gemini-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Google Gemini API key.", | |
"default": "", | |
"title": "Gemini-Api-Key" | |
}, | |
"description": "Google Gemini API key." | |
}, | |
{ | |
"name": "cohere-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Cohere API key.", | |
"default": "", | |
"title": "Cohere-Api-Key" | |
}, | |
"description": "Cohere API key." | |
}, | |
{ | |
"name": "groq-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Groq API key.", | |
"default": "", | |
"title": "Groq-Api-Key" | |
}, | |
"description": "Groq API key." | |
}, | |
{ | |
"name": "together-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Together AI API key.", | |
"default": "", | |
"title": "Together-Api-Key" | |
}, | |
"description": "Together AI API key." | |
}, | |
{ | |
"name": "jina-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Jina API key.", | |
"default": "", | |
"title": "Jina-Api-Key" | |
}, | |
"description": "Jina API key." | |
}, | |
{ | |
"name": "voyage-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Voyage API key.", | |
"default": "", | |
"title": "Voyage-Api-Key" | |
}, | |
"description": "Voyage API key." | |
} | |
], | |
"requestBody": { | |
"required": true, | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"table_id": { | |
"type": "string", | |
"pattern": "^[A-Za-z0-9]([A-Za-z0-9._-]{0,98}[A-Za-z0-9])?$", | |
"title": "Table Id", | |
"description": "Table name or ID." | |
}, | |
"column_map": { | |
"patternProperties": { | |
"^[A-Za-z0-9]([A-Za-z0-9 _-]{0,98}[A-Za-z0-9])?$": { | |
"anyOf": [ | |
{ | |
"properties": { | |
"id": { | |
"type": "string", | |
"title": "Id", | |
"description": "Chat ID. Must be unique against document ID for it to be embeddable. Defaults to ''.", | |
"default": "" | |
}, | |
"model": { | |
"type": "string", | |
"title": "Model", | |
"description": "ID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API.", | |
"default": "" | |
}, | |
"messages": { | |
"items": { | |
"properties": { | |
"role": { | |
"type": "string", | |
"enum": [ | |
"system", | |
"user", | |
"assistant" | |
], | |
"title": "ChatRole", | |
"description": "Represents who said a chat message." | |
}, | |
"content": { | |
"type": "string", | |
"title": "Content" | |
}, | |
"name": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Name" | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"role", | |
"content" | |
], | |
"title": "ChatEntry", | |
"description": "Represents a message in the chat context." | |
}, | |
"type": "array", | |
"minItems": 1, | |
"title": "Messages", | |
"description": "A list of messages comprising the conversation so far." | |
}, | |
"rag_params": { | |
"anyOf": [ | |
{ | |
"properties": { | |
"table_id": { | |
"type": "string", | |
"minLength": 2, | |
"title": "Table Id", | |
"description": "Knowledge Table ID", | |
"examples": [ | |
"my-dataset" | |
] | |
}, | |
"reranking_model": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Reranking Model", | |
"description": "Reranking model to use for hybrid search.", | |
"examples": [ | |
"cohere/rerank-multilingual-v3.0", | |
null | |
] | |
}, | |
"search_query": { | |
"type": "string", | |
"title": "Search Query", | |
"description": "Query used to retrieve items from the KB database. If not provided (default), it will be generated using LLM.", | |
"default": "" | |
}, | |
"k": { | |
"type": "integer", | |
"maximum": 1024, | |
"exclusiveMinimum": 0, | |
"title": "K", | |
"description": "Top-k closest text in terms of embedding distance. Must be in [1, 1024]. Defaults to 3.", | |
"default": 3, | |
"examples": [ | |
3 | |
] | |
}, | |
"rerank": { | |
"type": "boolean", | |
"title": "Rerank", | |
"description": "Flag to perform rerank on the retrieved results. Defaults to True.", | |
"default": true, | |
"examples": [ | |
true, | |
false | |
] | |
}, | |
"concat_reranker_input": { | |
"type": "boolean", | |
"title": "Concat Reranker Input", | |
"description": "Flag to concat title and content as reranker input. Defaults to False.", | |
"default": false, | |
"examples": [ | |
true, | |
false | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"table_id" | |
], | |
"title": "RAGParams" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"description": "Retrieval Augmented Generation search params. Defaults to None (disabled).", | |
"examples": [ | |
null | |
] | |
}, | |
"temperature": { | |
"type": "number", | |
"maximum": 2, | |
"minimum": 0.001, | |
"title": "Temperature", | |
"description": " What sampling temperature to use, in [0.001, 2.0].\n Higher values like 0.8 will make the output more random,\n while lower values like 0.2 will make it more focused and deterministic.\n", | |
"default": 0.2, | |
"examples": [ | |
0.2 | |
] | |
}, | |
"top_p": { | |
"type": "number", | |
"maximum": 1, | |
"minimum": 0.001, | |
"title": "Top P", | |
"description": " An alternative to sampling with temperature, called nucleus sampling,\n where the model considers the results of the tokens with top_p probability mass.\n So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n Must be in [0.001, 1.0].\n", | |
"default": 0.6, | |
"examples": [ | |
0.6 | |
] | |
}, | |
"n": { | |
"type": "integer", | |
"title": "N", | |
"description": "How many chat completion choices to generate for each input message.", | |
"default": 1, | |
"examples": [ | |
1 | |
] | |
}, | |
"stream": { | |
"type": "boolean", | |
"title": "Stream", | |
"description": "\n If set, partial message deltas will be sent, like in ChatGPT.\n Tokens will be sent as server-sent events as they become available,\n with the stream terminated by a 'data: [DONE]' message.\n", | |
"default": true, | |
"examples": [ | |
true | |
] | |
}, | |
"stop": { | |
"anyOf": [ | |
{ | |
"items": { | |
"type": "string" | |
}, | |
"type": "array" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Stop", | |
"description": "Up to 4 sequences where the API will stop generating further tokens.", | |
"examples": [ | |
null | |
] | |
}, | |
"max_tokens": { | |
"type": "integer", | |
"exclusiveMinimum": 0, | |
"title": "Max Tokens", | |
"description": " The maximum number of tokens to generate in the chat completion.\n Must be in [1, context_length - 1). Default is 2048.\n The total length of input tokens and generated tokens is limited by the model's context length.\n", | |
"default": 2048, | |
"examples": [ | |
2048 | |
] | |
}, | |
"presence_penalty": { | |
"type": "number", | |
"title": "Presence Penalty", | |
"description": " Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far,\n increasing the model's likelihood to talk about new topics.\n", | |
"default": 0, | |
"examples": [ | |
0 | |
] | |
}, | |
"frequency_penalty": { | |
"type": "number", | |
"title": "Frequency Penalty", | |
"description": " Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far,\n decreasing the model's likelihood to repeat the same line verbatim.\n", | |
"default": 0, | |
"examples": [ | |
0 | |
] | |
}, | |
"logit_bias": { | |
"type": "object", | |
"title": "Logit Bias", | |
"description": " Modify the likelihood of specified tokens appearing in the completion.\n Accepts a json object that maps tokens (specified by their token ID in the tokenizer)\n to an associated bias value from -100 to 100.\n Mathematically, the bias is added to the logits generated by the model prior to sampling.\n The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection;\n values like -100 or 100 should result in a ban or exclusive selection of the relevant token.\n", | |
"default": {}, | |
"examples": [ | |
{} | |
] | |
}, | |
"user": { | |
"type": "string", | |
"title": "User", | |
"description": "A unique identifier representing your end-user. For monitoring and debugging purposes.", | |
"default": "", | |
"examples": [ | |
"" | |
] | |
}, | |
"object": { | |
"type": "string", | |
"enum": [ | |
"gen_config.chat" | |
], | |
"const": "gen_config.chat", | |
"title": "Object", | |
"description": "The object type, which is always \"gen_config.chat\".", | |
"default": "gen_config.chat", | |
"examples": [ | |
"gen_config.chat" | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"messages" | |
], | |
"title": "ChatGenConfig" | |
}, | |
{ | |
"properties": { | |
"object": { | |
"type": "string", | |
"enum": [ | |
"gen_config.embed" | |
], | |
"const": "gen_config.embed", | |
"title": "Object", | |
"description": "The object type, which is always \"gen_config.embed\".", | |
"default": "gen_config.embed", | |
"examples": [ | |
"gen_config.embed" | |
] | |
}, | |
"embedding_model": { | |
"type": "string", | |
"title": "Embedding Model", | |
"description": "The embedding model to use.", | |
"examples": [ | |
"openai/text-embedding-3-small-512" | |
] | |
}, | |
"source_column": { | |
"type": "string", | |
"title": "Source Column", | |
"description": "The source column for embedding.", | |
"examples": [ | |
"text_column" | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"embedding_model", | |
"source_column" | |
], | |
"title": "EmbedGenConfig" | |
}, | |
{ | |
"type": "null" | |
} | |
] | |
} | |
}, | |
"type": "object", | |
"title": "Column Map", | |
"description": "Mapping of column ID to generation config JSON in the form of `GenConfig`. Table columns on its left can be referenced by `${column-name}`." | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"table_id", | |
"column_map" | |
], | |
"title": "GenConfigUpdateRequest" | |
} | |
} | |
} | |
}, | |
"responses": { | |
"200": { | |
"description": "Successful Response", | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"id": { | |
"type": "string", | |
"title": "Id", | |
"description": "Table name." | |
}, | |
"version": { | |
"type": "string", | |
"title": "Version", | |
"description": "Table version, following owl version.", | |
"default": "0.3.0" | |
}, | |
"meta": { | |
"type": "object", | |
"title": "Meta", | |
"description": "Additional metadata about the table.", | |
"default": {} | |
}, | |
"cols": { | |
"items": { | |
"properties": { | |
"id": { | |
"type": "string", | |
"title": "Id", | |
"description": "Column name." | |
}, | |
"dtype": { | |
"allOf": [ | |
{ | |
"type": "string", | |
"enum": [ | |
"int", | |
"int8", | |
"float", | |
"float32", | |
"float16", | |
"bool", | |
"str", | |
"date-time" | |
], | |
"title": "DtypeEnum" | |
} | |
], | |
"description": "Column data type, one of [\"int\", \"int8\", \"float\", \"float32\", \"float16\", \"bool\", \"str\", \"date-time\"]", | |
"default": "str" | |
}, | |
"vlen": { | |
"type": "integer", | |
"minimum": 0, | |
"title": "Vlen", | |
"description": "_Optional_. Vector length. If this is larger than zero, then `dtype` must be one of the floating data types. Defaults to zero.", | |
"default": 0 | |
}, | |
"index": { | |
"type": "boolean", | |
"title": "Index", | |
"description": "_Optional_. Whether to build full-text-search (FTS) or vector index for this column. Only applies to string and vector columns. Defaults to True.", | |
"default": true | |
}, | |
"gen_config": { | |
"anyOf": [ | |
{ | |
"properties": { | |
"id": { | |
"type": "string", | |
"title": "Id", | |
"description": "Chat ID. Must be unique against document ID for it to be embeddable. Defaults to ''.", | |
"default": "" | |
}, | |
"model": { | |
"type": "string", | |
"title": "Model", | |
"description": "ID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API.", | |
"default": "" | |
}, | |
"messages": { | |
"items": { | |
"properties": { | |
"role": { | |
"type": "string", | |
"enum": [ | |
"system", | |
"user", | |
"assistant" | |
], | |
"title": "ChatRole", | |
"description": "Represents who said a chat message." | |
}, | |
"content": { | |
"type": "string", | |
"title": "Content" | |
}, | |
"name": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Name" | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"role", | |
"content" | |
], | |
"title": "ChatEntry", | |
"description": "Represents a message in the chat context." | |
}, | |
"type": "array", | |
"minItems": 1, | |
"title": "Messages", | |
"description": "A list of messages comprising the conversation so far." | |
}, | |
"rag_params": { | |
"anyOf": [ | |
{ | |
"properties": { | |
"table_id": { | |
"type": "string", | |
"minLength": 2, | |
"title": "Table Id", | |
"description": "Knowledge Table ID", | |
"examples": [ | |
"my-dataset" | |
] | |
}, | |
"reranking_model": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Reranking Model", | |
"description": "Reranking model to use for hybrid search.", | |
"examples": [ | |
"cohere/rerank-multilingual-v3.0", | |
null | |
] | |
}, | |
"search_query": { | |
"type": "string", | |
"title": "Search Query", | |
"description": "Query used to retrieve items from the KB database. If not provided (default), it will be generated using LLM.", | |
"default": "" | |
}, | |
"k": { | |
"type": "integer", | |
"maximum": 1024, | |
"exclusiveMinimum": 0, | |
"title": "K", | |
"description": "Top-k closest text in terms of embedding distance. Must be in [1, 1024]. Defaults to 3.", | |
"default": 3, | |
"examples": [ | |
3 | |
] | |
}, | |
"rerank": { | |
"type": "boolean", | |
"title": "Rerank", | |
"description": "Flag to perform rerank on the retrieved results. Defaults to True.", | |
"default": true, | |
"examples": [ | |
true, | |
false | |
] | |
}, | |
"concat_reranker_input": { | |
"type": "boolean", | |
"title": "Concat Reranker Input", | |
"description": "Flag to concat title and content as reranker input. Defaults to False.", | |
"default": false, | |
"examples": [ | |
true, | |
false | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"table_id" | |
], | |
"title": "RAGParams" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"description": "Retrieval Augmented Generation search params. Defaults to None (disabled).", | |
"examples": [ | |
null | |
] | |
}, | |
"temperature": { | |
"type": "number", | |
"maximum": 2, | |
"minimum": 0.001, | |
"title": "Temperature", | |
"description": " What sampling temperature to use, in [0.001, 2.0].\n Higher values like 0.8 will make the output more random,\n while lower values like 0.2 will make it more focused and deterministic.\n", | |
"default": 0.2, | |
"examples": [ | |
0.2 | |
] | |
}, | |
"top_p": { | |
"type": "number", | |
"maximum": 1, | |
"minimum": 0.001, | |
"title": "Top P", | |
"description": " An alternative to sampling with temperature, called nucleus sampling,\n where the model considers the results of the tokens with top_p probability mass.\n So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n Must be in [0.001, 1.0].\n", | |
"default": 0.6, | |
"examples": [ | |
0.6 | |
] | |
}, | |
"n": { | |
"type": "integer", | |
"title": "N", | |
"description": "How many chat completion choices to generate for each input message.", | |
"default": 1, | |
"examples": [ | |
1 | |
] | |
}, | |
"stream": { | |
"type": "boolean", | |
"title": "Stream", | |
"description": "\n If set, partial message deltas will be sent, like in ChatGPT.\n Tokens will be sent as server-sent events as they become available,\n with the stream terminated by a 'data: [DONE]' message.\n", | |
"default": true, | |
"examples": [ | |
true | |
] | |
}, | |
"stop": { | |
"anyOf": [ | |
{ | |
"items": { | |
"type": "string" | |
}, | |
"type": "array" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Stop", | |
"description": "Up to 4 sequences where the API will stop generating further tokens.", | |
"examples": [ | |
null | |
] | |
}, | |
"max_tokens": { | |
"type": "integer", | |
"exclusiveMinimum": 0, | |
"title": "Max Tokens", | |
"description": " The maximum number of tokens to generate in the chat completion.\n Must be in [1, context_length - 1). Default is 2048.\n The total length of input tokens and generated tokens is limited by the model's context length.\n", | |
"default": 2048, | |
"examples": [ | |
2048 | |
] | |
}, | |
"presence_penalty": { | |
"type": "number", | |
"title": "Presence Penalty", | |
"description": " Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far,\n increasing the model's likelihood to talk about new topics.\n", | |
"default": 0, | |
"examples": [ | |
0 | |
] | |
}, | |
"frequency_penalty": { | |
"type": "number", | |
"title": "Frequency Penalty", | |
"description": " Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far,\n decreasing the model's likelihood to repeat the same line verbatim.\n", | |
"default": 0, | |
"examples": [ | |
0 | |
] | |
}, | |
"logit_bias": { | |
"type": "object", | |
"title": "Logit Bias", | |
"description": " Modify the likelihood of specified tokens appearing in the completion.\n Accepts a json object that maps tokens (specified by their token ID in the tokenizer)\n to an associated bias value from -100 to 100.\n Mathematically, the bias is added to the logits generated by the model prior to sampling.\n The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection;\n values like -100 or 100 should result in a ban or exclusive selection of the relevant token.\n", | |
"default": {}, | |
"examples": [ | |
{} | |
] | |
}, | |
"user": { | |
"type": "string", | |
"title": "User", | |
"description": "A unique identifier representing your end-user. For monitoring and debugging purposes.", | |
"default": "", | |
"examples": [ | |
"" | |
] | |
}, | |
"object": { | |
"type": "string", | |
"enum": [ | |
"gen_config.chat" | |
], | |
"const": "gen_config.chat", | |
"title": "Object", | |
"description": "The object type, which is always \"gen_config.chat\".", | |
"default": "gen_config.chat", | |
"examples": [ | |
"gen_config.chat" | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"messages" | |
], | |
"title": "ChatGenConfig" | |
}, | |
{ | |
"properties": { | |
"object": { | |
"type": "string", | |
"enum": [ | |
"gen_config.embed" | |
], | |
"const": "gen_config.embed", | |
"title": "Object", | |
"description": "The object type, which is always \"gen_config.embed\".", | |
"default": "gen_config.embed", | |
"examples": [ | |
"gen_config.embed" | |
] | |
}, | |
"embedding_model": { | |
"type": "string", | |
"title": "Embedding Model", | |
"description": "The embedding model to use.", | |
"examples": [ | |
"openai/text-embedding-3-small-512" | |
] | |
}, | |
"source_column": { | |
"type": "string", | |
"title": "Source Column", | |
"description": "The source column for embedding.", | |
"examples": [ | |
"text_column" | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"embedding_model", | |
"source_column" | |
], | |
"title": "EmbedGenConfig" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Gen Config", | |
"description": "_Optional_. Generation config. If provided, then this column will be an \"Output Column\". Table columns on its left can be referenced by `${column-name}`." | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"id" | |
], | |
"title": "ColumnSchema" | |
}, | |
"type": "array", | |
"title": "Cols", | |
"description": "List of column schema." | |
}, | |
"parent_id": { | |
"anyOf": [ | |
{ | |
"type": "string", | |
"pattern": "^[A-Za-z0-9]([A-Za-z0-9._-]{0,98}[A-Za-z0-9])?$", | |
"description": "Table name or ID. Must be unique with at least 1 character and up to 100 characters. Must start and end with an alphabet or number. Characters in the middle can include `_` (underscore), `-` (dash), `.` (dot)." | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Parent Id", | |
"description": "The parent table ID. If None (default), it means this is a template table." | |
}, | |
"title": { | |
"type": "string", | |
"title": "Title", | |
"description": "Chat title. Defaults to ''." | |
}, | |
"updated_at": { | |
"type": "string", | |
"title": "Updated At", | |
"description": "Table last update timestamp (ISO 8601 UTC)." | |
}, | |
"indexed_at_fts": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Indexed At Fts", | |
"description": "Table last FTS index timestamp (ISO 8601 UTC)." | |
}, | |
"indexed_at_vec": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Indexed At Vec", | |
"description": "Table last vector index timestamp (ISO 8601 UTC)." | |
}, | |
"indexed_at_sca": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Indexed At Sca", | |
"description": "Table last scalar index timestamp (ISO 8601 UTC)." | |
}, | |
"num_rows": { | |
"type": "integer", | |
"title": "Num Rows", | |
"description": "Number of rows in the table." | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"id", | |
"cols", | |
"parent_id", | |
"title", | |
"updated_at", | |
"indexed_at_fts", | |
"indexed_at_vec", | |
"indexed_at_sca", | |
"num_rows" | |
], | |
"title": "TableMetaResponse" | |
} | |
} | |
} | |
}, | |
"422": { | |
"description": "Validation Error", | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"detail": { | |
"items": { | |
"properties": { | |
"loc": { | |
"items": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "integer" | |
} | |
] | |
}, | |
"type": "array", | |
"title": "Location" | |
}, | |
"msg": { | |
"type": "string", | |
"title": "Message" | |
}, | |
"type": { | |
"type": "string", | |
"title": "Error Type" | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"loc", | |
"msg", | |
"type" | |
], | |
"title": "ValidationError" | |
}, | |
"type": "array", | |
"title": "Detail" | |
} | |
}, | |
"type": "object", | |
"title": "HTTPValidationError" | |
} | |
} | |
} | |
} | |
} | |
} | |
}, | |
"/api/v1/gen_tables/action/columns/add": { | |
"post": { | |
"tags": [ | |
"Generative Table" | |
], | |
"summary": "Add Action Columns", | |
"operationId": "add_action_columns", | |
"parameters": [ | |
{ | |
"name": "openai-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "OpenAI API key.", | |
"default": "", | |
"title": "Openai-Api-Key" | |
}, | |
"description": "OpenAI API key." | |
}, | |
{ | |
"name": "anthropic-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Anthropic API key.", | |
"default": "", | |
"title": "Anthropic-Api-Key" | |
}, | |
"description": "Anthropic API key." | |
}, | |
{ | |
"name": "gemini-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Google Gemini API key.", | |
"default": "", | |
"title": "Gemini-Api-Key" | |
}, | |
"description": "Google Gemini API key." | |
}, | |
{ | |
"name": "cohere-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Cohere API key.", | |
"default": "", | |
"title": "Cohere-Api-Key" | |
}, | |
"description": "Cohere API key." | |
}, | |
{ | |
"name": "groq-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Groq API key.", | |
"default": "", | |
"title": "Groq-Api-Key" | |
}, | |
"description": "Groq API key." | |
}, | |
{ | |
"name": "together-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Together AI API key.", | |
"default": "", | |
"title": "Together-Api-Key" | |
}, | |
"description": "Together AI API key." | |
}, | |
{ | |
"name": "jina-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Jina API key.", | |
"default": "", | |
"title": "Jina-Api-Key" | |
}, | |
"description": "Jina API key." | |
}, | |
{ | |
"name": "voyage-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Voyage API key.", | |
"default": "", | |
"title": "Voyage-Api-Key" | |
}, | |
"description": "Voyage API key." | |
} | |
], | |
"requestBody": { | |
"required": true, | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"id": { | |
"type": "string", | |
"pattern": "^[A-Za-z0-9]([A-Za-z0-9._-]{0,98}[A-Za-z0-9])?$", | |
"title": "Id", | |
"description": "Table name." | |
}, | |
"version": { | |
"type": "string", | |
"title": "Version", | |
"description": "Table version, following owl version.", | |
"default": "0.3.0" | |
}, | |
"meta": { | |
"type": "object", | |
"title": "Meta", | |
"description": "Additional metadata about the table.", | |
"default": {} | |
}, | |
"cols": { | |
"items": { | |
"properties": { | |
"id": { | |
"type": "string", | |
"pattern": "^[A-Za-z0-9]([A-Za-z0-9 _-]{0,98}[A-Za-z0-9])?$", | |
"title": "Id", | |
"description": "Column name." | |
}, | |
"dtype": { | |
"allOf": [ | |
{ | |
"type": "string", | |
"enum": [ | |
"int", | |
"float", | |
"bool", | |
"str" | |
], | |
"title": "DtypeCreateEnum" | |
} | |
], | |
"description": "Column data type, one of [\"int\", \"float\", \"bool\", \"str\"]", | |
"default": "str" | |
}, | |
"vlen": { | |
"type": "integer", | |
"minimum": 0, | |
"title": "Vlen", | |
"description": "_Optional_. Vector length. If this is larger than zero, then `dtype` must be one of the floating data types. Defaults to zero.", | |
"default": 0 | |
}, | |
"index": { | |
"type": "boolean", | |
"title": "Index", | |
"description": "_Optional_. Whether to build full-text-search (FTS) or vector index for this column. Only applies to string and vector columns. Defaults to True.", | |
"default": true | |
}, | |
"gen_config": { | |
"anyOf": [ | |
{ | |
"properties": { | |
"id": { | |
"type": "string", | |
"title": "Id", | |
"description": "Chat ID. Must be unique against document ID for it to be embeddable. Defaults to ''.", | |
"default": "" | |
}, | |
"model": { | |
"type": "string", | |
"title": "Model", | |
"description": "ID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API.", | |
"default": "" | |
}, | |
"messages": { | |
"items": { | |
"properties": { | |
"role": { | |
"type": "string", | |
"enum": [ | |
"system", | |
"user", | |
"assistant" | |
], | |
"title": "ChatRole", | |
"description": "Represents who said a chat message." | |
}, | |
"content": { | |
"type": "string", | |
"title": "Content" | |
}, | |
"name": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Name" | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"role", | |
"content" | |
], | |
"title": "ChatEntry", | |
"description": "Represents a message in the chat context." | |
}, | |
"type": "array", | |
"minItems": 1, | |
"title": "Messages", | |
"description": "A list of messages comprising the conversation so far." | |
}, | |
"rag_params": { | |
"anyOf": [ | |
{ | |
"properties": { | |
"table_id": { | |
"type": "string", | |
"minLength": 2, | |
"title": "Table Id", | |
"description": "Knowledge Table ID", | |
"examples": [ | |
"my-dataset" | |
] | |
}, | |
"reranking_model": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Reranking Model", | |
"description": "Reranking model to use for hybrid search.", | |
"examples": [ | |
"cohere/rerank-multilingual-v3.0", | |
null | |
] | |
}, | |
"search_query": { | |
"type": "string", | |
"title": "Search Query", | |
"description": "Query used to retrieve items from the KB database. If not provided (default), it will be generated using LLM.", | |
"default": "" | |
}, | |
"k": { | |
"type": "integer", | |
"maximum": 1024, | |
"exclusiveMinimum": 0, | |
"title": "K", | |
"description": "Top-k closest text in terms of embedding distance. Must be in [1, 1024]. Defaults to 3.", | |
"default": 3, | |
"examples": [ | |
3 | |
] | |
}, | |
"rerank": { | |
"type": "boolean", | |
"title": "Rerank", | |
"description": "Flag to perform rerank on the retrieved results. Defaults to True.", | |
"default": true, | |
"examples": [ | |
true, | |
false | |
] | |
}, | |
"concat_reranker_input": { | |
"type": "boolean", | |
"title": "Concat Reranker Input", | |
"description": "Flag to concat title and content as reranker input. Defaults to False.", | |
"default": false, | |
"examples": [ | |
true, | |
false | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"table_id" | |
], | |
"title": "RAGParams" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"description": "Retrieval Augmented Generation search params. Defaults to None (disabled).", | |
"examples": [ | |
null | |
] | |
}, | |
"temperature": { | |
"type": "number", | |
"maximum": 2, | |
"minimum": 0.001, | |
"title": "Temperature", | |
"description": " What sampling temperature to use, in [0.001, 2.0].\n Higher values like 0.8 will make the output more random,\n while lower values like 0.2 will make it more focused and deterministic.\n", | |
"default": 0.2, | |
"examples": [ | |
0.2 | |
] | |
}, | |
"top_p": { | |
"type": "number", | |
"maximum": 1, | |
"minimum": 0.001, | |
"title": "Top P", | |
"description": " An alternative to sampling with temperature, called nucleus sampling,\n where the model considers the results of the tokens with top_p probability mass.\n So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n Must be in [0.001, 1.0].\n", | |
"default": 0.6, | |
"examples": [ | |
0.6 | |
] | |
}, | |
"n": { | |
"type": "integer", | |
"title": "N", | |
"description": "How many chat completion choices to generate for each input message.", | |
"default": 1, | |
"examples": [ | |
1 | |
] | |
}, | |
"stream": { | |
"type": "boolean", | |
"title": "Stream", | |
"description": "\n If set, partial message deltas will be sent, like in ChatGPT.\n Tokens will be sent as server-sent events as they become available,\n with the stream terminated by a 'data: [DONE]' message.\n", | |
"default": true, | |
"examples": [ | |
true | |
] | |
}, | |
"stop": { | |
"anyOf": [ | |
{ | |
"items": { | |
"type": "string" | |
}, | |
"type": "array" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Stop", | |
"description": "Up to 4 sequences where the API will stop generating further tokens.", | |
"examples": [ | |
null | |
] | |
}, | |
"max_tokens": { | |
"type": "integer", | |
"exclusiveMinimum": 0, | |
"title": "Max Tokens", | |
"description": " The maximum number of tokens to generate in the chat completion.\n Must be in [1, context_length - 1). Default is 2048.\n The total length of input tokens and generated tokens is limited by the model's context length.\n", | |
"default": 2048, | |
"examples": [ | |
2048 | |
] | |
}, | |
"presence_penalty": { | |
"type": "number", | |
"title": "Presence Penalty", | |
"description": " Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far,\n increasing the model's likelihood to talk about new topics.\n", | |
"default": 0, | |
"examples": [ | |
0 | |
] | |
}, | |
"frequency_penalty": { | |
"type": "number", | |
"title": "Frequency Penalty", | |
"description": " Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far,\n decreasing the model's likelihood to repeat the same line verbatim.\n", | |
"default": 0, | |
"examples": [ | |
0 | |
] | |
}, | |
"logit_bias": { | |
"type": "object", | |
"title": "Logit Bias", | |
"description": " Modify the likelihood of specified tokens appearing in the completion.\n Accepts a json object that maps tokens (specified by their token ID in the tokenizer)\n to an associated bias value from -100 to 100.\n Mathematically, the bias is added to the logits generated by the model prior to sampling.\n The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection;\n values like -100 or 100 should result in a ban or exclusive selection of the relevant token.\n", | |
"default": {}, | |
"examples": [ | |
{} | |
] | |
}, | |
"user": { | |
"type": "string", | |
"title": "User", | |
"description": "A unique identifier representing your end-user. For monitoring and debugging purposes.", | |
"default": "", | |
"examples": [ | |
"" | |
] | |
}, | |
"object": { | |
"type": "string", | |
"enum": [ | |
"gen_config.chat" | |
], | |
"const": "gen_config.chat", | |
"title": "Object", | |
"description": "The object type, which is always \"gen_config.chat\".", | |
"default": "gen_config.chat", | |
"examples": [ | |
"gen_config.chat" | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"messages" | |
], | |
"title": "ChatGenConfig" | |
}, | |
{ | |
"properties": { | |
"object": { | |
"type": "string", | |
"enum": [ | |
"gen_config.embed" | |
], | |
"const": "gen_config.embed", | |
"title": "Object", | |
"description": "The object type, which is always \"gen_config.embed\".", | |
"default": "gen_config.embed", | |
"examples": [ | |
"gen_config.embed" | |
] | |
}, | |
"embedding_model": { | |
"type": "string", | |
"title": "Embedding Model", | |
"description": "The embedding model to use.", | |
"examples": [ | |
"openai/text-embedding-3-small-512" | |
] | |
}, | |
"source_column": { | |
"type": "string", | |
"title": "Source Column", | |
"description": "The source column for embedding.", | |
"examples": [ | |
"text_column" | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"embedding_model", | |
"source_column" | |
], | |
"title": "EmbedGenConfig" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Gen Config", | |
"description": "_Optional_. Generation config. If provided, then this column will be an \"Output Column\". Table columns on its left can be referenced by `${column-name}`." | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"id" | |
], | |
"title": "ColumnSchemaCreate" | |
}, | |
"type": "array", | |
"title": "Cols", | |
"description": "List of column schema." | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"id", | |
"cols" | |
], | |
"title": "AddActionColumnSchema" | |
} | |
} | |
} | |
}, | |
"responses": { | |
"200": { | |
"description": "Successful Response", | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"id": { | |
"type": "string", | |
"title": "Id", | |
"description": "Table name." | |
}, | |
"version": { | |
"type": "string", | |
"title": "Version", | |
"description": "Table version, following owl version.", | |
"default": "0.3.0" | |
}, | |
"meta": { | |
"type": "object", | |
"title": "Meta", | |
"description": "Additional metadata about the table.", | |
"default": {} | |
}, | |
"cols": { | |
"items": { | |
"properties": { | |
"id": { | |
"type": "string", | |
"title": "Id", | |
"description": "Column name." | |
}, | |
"dtype": { | |
"allOf": [ | |
{ | |
"type": "string", | |
"enum": [ | |
"int", | |
"int8", | |
"float", | |
"float32", | |
"float16", | |
"bool", | |
"str", | |
"date-time" | |
], | |
"title": "DtypeEnum" | |
} | |
], | |
"description": "Column data type, one of [\"int\", \"int8\", \"float\", \"float32\", \"float16\", \"bool\", \"str\", \"date-time\"]", | |
"default": "str" | |
}, | |
"vlen": { | |
"type": "integer", | |
"minimum": 0, | |
"title": "Vlen", | |
"description": "_Optional_. Vector length. If this is larger than zero, then `dtype` must be one of the floating data types. Defaults to zero.", | |
"default": 0 | |
}, | |
"index": { | |
"type": "boolean", | |
"title": "Index", | |
"description": "_Optional_. Whether to build full-text-search (FTS) or vector index for this column. Only applies to string and vector columns. Defaults to True.", | |
"default": true | |
}, | |
"gen_config": { | |
"anyOf": [ | |
{ | |
"properties": { | |
"id": { | |
"type": "string", | |
"title": "Id", | |
"description": "Chat ID. Must be unique against document ID for it to be embeddable. Defaults to ''.", | |
"default": "" | |
}, | |
"model": { | |
"type": "string", | |
"title": "Model", | |
"description": "ID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API.", | |
"default": "" | |
}, | |
"messages": { | |
"items": { | |
"properties": { | |
"role": { | |
"type": "string", | |
"enum": [ | |
"system", | |
"user", | |
"assistant" | |
], | |
"title": "ChatRole", | |
"description": "Represents who said a chat message." | |
}, | |
"content": { | |
"type": "string", | |
"title": "Content" | |
}, | |
"name": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Name" | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"role", | |
"content" | |
], | |
"title": "ChatEntry", | |
"description": "Represents a message in the chat context." | |
}, | |
"type": "array", | |
"minItems": 1, | |
"title": "Messages", | |
"description": "A list of messages comprising the conversation so far." | |
}, | |
"rag_params": { | |
"anyOf": [ | |
{ | |
"properties": { | |
"table_id": { | |
"type": "string", | |
"minLength": 2, | |
"title": "Table Id", | |
"description": "Knowledge Table ID", | |
"examples": [ | |
"my-dataset" | |
] | |
}, | |
"reranking_model": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Reranking Model", | |
"description": "Reranking model to use for hybrid search.", | |
"examples": [ | |
"cohere/rerank-multilingual-v3.0", | |
null | |
] | |
}, | |
"search_query": { | |
"type": "string", | |
"title": "Search Query", | |
"description": "Query used to retrieve items from the KB database. If not provided (default), it will be generated using LLM.", | |
"default": "" | |
}, | |
"k": { | |
"type": "integer", | |
"maximum": 1024, | |
"exclusiveMinimum": 0, | |
"title": "K", | |
"description": "Top-k closest text in terms of embedding distance. Must be in [1, 1024]. Defaults to 3.", | |
"default": 3, | |
"examples": [ | |
3 | |
] | |
}, | |
"rerank": { | |
"type": "boolean", | |
"title": "Rerank", | |
"description": "Flag to perform rerank on the retrieved results. Defaults to True.", | |
"default": true, | |
"examples": [ | |
true, | |
false | |
] | |
}, | |
"concat_reranker_input": { | |
"type": "boolean", | |
"title": "Concat Reranker Input", | |
"description": "Flag to concat title and content as reranker input. Defaults to False.", | |
"default": false, | |
"examples": [ | |
true, | |
false | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"table_id" | |
], | |
"title": "RAGParams" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"description": "Retrieval Augmented Generation search params. Defaults to None (disabled).", | |
"examples": [ | |
null | |
] | |
}, | |
"temperature": { | |
"type": "number", | |
"maximum": 2, | |
"minimum": 0.001, | |
"title": "Temperature", | |
"description": " What sampling temperature to use, in [0.001, 2.0].\n Higher values like 0.8 will make the output more random,\n while lower values like 0.2 will make it more focused and deterministic.\n", | |
"default": 0.2, | |
"examples": [ | |
0.2 | |
] | |
}, | |
"top_p": { | |
"type": "number", | |
"maximum": 1, | |
"minimum": 0.001, | |
"title": "Top P", | |
"description": " An alternative to sampling with temperature, called nucleus sampling,\n where the model considers the results of the tokens with top_p probability mass.\n So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n Must be in [0.001, 1.0].\n", | |
"default": 0.6, | |
"examples": [ | |
0.6 | |
] | |
}, | |
"n": { | |
"type": "integer", | |
"title": "N", | |
"description": "How many chat completion choices to generate for each input message.", | |
"default": 1, | |
"examples": [ | |
1 | |
] | |
}, | |
"stream": { | |
"type": "boolean", | |
"title": "Stream", | |
"description": "\n If set, partial message deltas will be sent, like in ChatGPT.\n Tokens will be sent as server-sent events as they become available,\n with the stream terminated by a 'data: [DONE]' message.\n", | |
"default": true, | |
"examples": [ | |
true | |
] | |
}, | |
"stop": { | |
"anyOf": [ | |
{ | |
"items": { | |
"type": "string" | |
}, | |
"type": "array" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Stop", | |
"description": "Up to 4 sequences where the API will stop generating further tokens.", | |
"examples": [ | |
null | |
] | |
}, | |
"max_tokens": { | |
"type": "integer", | |
"exclusiveMinimum": 0, | |
"title": "Max Tokens", | |
"description": " The maximum number of tokens to generate in the chat completion.\n Must be in [1, context_length - 1). Default is 2048.\n The total length of input tokens and generated tokens is limited by the model's context length.\n", | |
"default": 2048, | |
"examples": [ | |
2048 | |
] | |
}, | |
"presence_penalty": { | |
"type": "number", | |
"title": "Presence Penalty", | |
"description": " Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far,\n increasing the model's likelihood to talk about new topics.\n", | |
"default": 0, | |
"examples": [ | |
0 | |
] | |
}, | |
"frequency_penalty": { | |
"type": "number", | |
"title": "Frequency Penalty", | |
"description": " Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far,\n decreasing the model's likelihood to repeat the same line verbatim.\n", | |
"default": 0, | |
"examples": [ | |
0 | |
] | |
}, | |
"logit_bias": { | |
"type": "object", | |
"title": "Logit Bias", | |
"description": " Modify the likelihood of specified tokens appearing in the completion.\n Accepts a json object that maps tokens (specified by their token ID in the tokenizer)\n to an associated bias value from -100 to 100.\n Mathematically, the bias is added to the logits generated by the model prior to sampling.\n The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection;\n values like -100 or 100 should result in a ban or exclusive selection of the relevant token.\n", | |
"default": {}, | |
"examples": [ | |
{} | |
] | |
}, | |
"user": { | |
"type": "string", | |
"title": "User", | |
"description": "A unique identifier representing your end-user. For monitoring and debugging purposes.", | |
"default": "", | |
"examples": [ | |
"" | |
] | |
}, | |
"object": { | |
"type": "string", | |
"enum": [ | |
"gen_config.chat" | |
], | |
"const": "gen_config.chat", | |
"title": "Object", | |
"description": "The object type, which is always \"gen_config.chat\".", | |
"default": "gen_config.chat", | |
"examples": [ | |
"gen_config.chat" | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"messages" | |
], | |
"title": "ChatGenConfig" | |
}, | |
{ | |
"properties": { | |
"object": { | |
"type": "string", | |
"enum": [ | |
"gen_config.embed" | |
], | |
"const": "gen_config.embed", | |
"title": "Object", | |
"description": "The object type, which is always \"gen_config.embed\".", | |
"default": "gen_config.embed", | |
"examples": [ | |
"gen_config.embed" | |
] | |
}, | |
"embedding_model": { | |
"type": "string", | |
"title": "Embedding Model", | |
"description": "The embedding model to use.", | |
"examples": [ | |
"openai/text-embedding-3-small-512" | |
] | |
}, | |
"source_column": { | |
"type": "string", | |
"title": "Source Column", | |
"description": "The source column for embedding.", | |
"examples": [ | |
"text_column" | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"embedding_model", | |
"source_column" | |
], | |
"title": "EmbedGenConfig" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Gen Config", | |
"description": "_Optional_. Generation config. If provided, then this column will be an \"Output Column\". Table columns on its left can be referenced by `${column-name}`." | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"id" | |
], | |
"title": "ColumnSchema" | |
}, | |
"type": "array", | |
"title": "Cols", | |
"description": "List of column schema." | |
}, | |
"parent_id": { | |
"anyOf": [ | |
{ | |
"type": "string", | |
"pattern": "^[A-Za-z0-9]([A-Za-z0-9._-]{0,98}[A-Za-z0-9])?$", | |
"description": "Table name or ID. Must be unique with at least 1 character and up to 100 characters. Must start and end with an alphabet or number. Characters in the middle can include `_` (underscore), `-` (dash), `.` (dot)." | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Parent Id", | |
"description": "The parent table ID. If None (default), it means this is a template table." | |
}, | |
"title": { | |
"type": "string", | |
"title": "Title", | |
"description": "Chat title. Defaults to ''." | |
}, | |
"updated_at": { | |
"type": "string", | |
"title": "Updated At", | |
"description": "Table last update timestamp (ISO 8601 UTC)." | |
}, | |
"indexed_at_fts": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Indexed At Fts", | |
"description": "Table last FTS index timestamp (ISO 8601 UTC)." | |
}, | |
"indexed_at_vec": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Indexed At Vec", | |
"description": "Table last vector index timestamp (ISO 8601 UTC)." | |
}, | |
"indexed_at_sca": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Indexed At Sca", | |
"description": "Table last scalar index timestamp (ISO 8601 UTC)." | |
}, | |
"num_rows": { | |
"type": "integer", | |
"title": "Num Rows", | |
"description": "Number of rows in the table." | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"id", | |
"cols", | |
"parent_id", | |
"title", | |
"updated_at", | |
"indexed_at_fts", | |
"indexed_at_vec", | |
"indexed_at_sca", | |
"num_rows" | |
], | |
"title": "TableMetaResponse" | |
} | |
} | |
} | |
}, | |
"422": { | |
"description": "Validation Error", | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"detail": { | |
"items": { | |
"properties": { | |
"loc": { | |
"items": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "integer" | |
} | |
] | |
}, | |
"type": "array", | |
"title": "Location" | |
}, | |
"msg": { | |
"type": "string", | |
"title": "Message" | |
}, | |
"type": { | |
"type": "string", | |
"title": "Error Type" | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"loc", | |
"msg", | |
"type" | |
], | |
"title": "ValidationError" | |
}, | |
"type": "array", | |
"title": "Detail" | |
} | |
}, | |
"type": "object", | |
"title": "HTTPValidationError" | |
} | |
} | |
} | |
} | |
} | |
} | |
}, | |
"/api/v1/gen_tables/knowledge/columns/add": { | |
"post": { | |
"tags": [ | |
"Generative Table" | |
], | |
"summary": "Add Knowledge Columns", | |
"operationId": "add_knowledge_columns", | |
"parameters": [ | |
{ | |
"name": "openai-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "OpenAI API key.", | |
"default": "", | |
"title": "Openai-Api-Key" | |
}, | |
"description": "OpenAI API key." | |
}, | |
{ | |
"name": "anthropic-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Anthropic API key.", | |
"default": "", | |
"title": "Anthropic-Api-Key" | |
}, | |
"description": "Anthropic API key." | |
}, | |
{ | |
"name": "gemini-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Google Gemini API key.", | |
"default": "", | |
"title": "Gemini-Api-Key" | |
}, | |
"description": "Google Gemini API key." | |
}, | |
{ | |
"name": "cohere-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Cohere API key.", | |
"default": "", | |
"title": "Cohere-Api-Key" | |
}, | |
"description": "Cohere API key." | |
}, | |
{ | |
"name": "groq-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Groq API key.", | |
"default": "", | |
"title": "Groq-Api-Key" | |
}, | |
"description": "Groq API key." | |
}, | |
{ | |
"name": "together-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Together AI API key.", | |
"default": "", | |
"title": "Together-Api-Key" | |
}, | |
"description": "Together AI API key." | |
}, | |
{ | |
"name": "jina-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Jina API key.", | |
"default": "", | |
"title": "Jina-Api-Key" | |
}, | |
"description": "Jina API key." | |
}, | |
{ | |
"name": "voyage-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Voyage API key.", | |
"default": "", | |
"title": "Voyage-Api-Key" | |
}, | |
"description": "Voyage API key." | |
} | |
], | |
"requestBody": { | |
"required": true, | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"id": { | |
"type": "string", | |
"pattern": "^[A-Za-z0-9]([A-Za-z0-9._-]{0,98}[A-Za-z0-9])?$", | |
"title": "Id", | |
"description": "Table name." | |
}, | |
"version": { | |
"type": "string", | |
"title": "Version", | |
"description": "Table version, following owl version.", | |
"default": "0.3.0" | |
}, | |
"meta": { | |
"type": "object", | |
"title": "Meta", | |
"description": "Additional metadata about the table.", | |
"default": {} | |
}, | |
"cols": { | |
"items": { | |
"properties": { | |
"id": { | |
"type": "string", | |
"pattern": "^[A-Za-z0-9]([A-Za-z0-9 _-]{0,98}[A-Za-z0-9])?$", | |
"title": "Id", | |
"description": "Column name." | |
}, | |
"dtype": { | |
"allOf": [ | |
{ | |
"type": "string", | |
"enum": [ | |
"int", | |
"float", | |
"bool", | |
"str" | |
], | |
"title": "DtypeCreateEnum" | |
} | |
], | |
"description": "Column data type, one of [\"int\", \"float\", \"bool\", \"str\"]", | |
"default": "str" | |
}, | |
"vlen": { | |
"type": "integer", | |
"minimum": 0, | |
"title": "Vlen", | |
"description": "_Optional_. Vector length. If this is larger than zero, then `dtype` must be one of the floating data types. Defaults to zero.", | |
"default": 0 | |
}, | |
"index": { | |
"type": "boolean", | |
"title": "Index", | |
"description": "_Optional_. Whether to build full-text-search (FTS) or vector index for this column. Only applies to string and vector columns. Defaults to True.", | |
"default": true | |
}, | |
"gen_config": { | |
"anyOf": [ | |
{ | |
"properties": { | |
"id": { | |
"type": "string", | |
"title": "Id", | |
"description": "Chat ID. Must be unique against document ID for it to be embeddable. Defaults to ''.", | |
"default": "" | |
}, | |
"model": { | |
"type": "string", | |
"title": "Model", | |
"description": "ID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API.", | |
"default": "" | |
}, | |
"messages": { | |
"items": { | |
"properties": { | |
"role": { | |
"type": "string", | |
"enum": [ | |
"system", | |
"user", | |
"assistant" | |
], | |
"title": "ChatRole", | |
"description": "Represents who said a chat message." | |
}, | |
"content": { | |
"type": "string", | |
"title": "Content" | |
}, | |
"name": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Name" | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"role", | |
"content" | |
], | |
"title": "ChatEntry", | |
"description": "Represents a message in the chat context." | |
}, | |
"type": "array", | |
"minItems": 1, | |
"title": "Messages", | |
"description": "A list of messages comprising the conversation so far." | |
}, | |
"rag_params": { | |
"anyOf": [ | |
{ | |
"properties": { | |
"table_id": { | |
"type": "string", | |
"minLength": 2, | |
"title": "Table Id", | |
"description": "Knowledge Table ID", | |
"examples": [ | |
"my-dataset" | |
] | |
}, | |
"reranking_model": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Reranking Model", | |
"description": "Reranking model to use for hybrid search.", | |
"examples": [ | |
"cohere/rerank-multilingual-v3.0", | |
null | |
] | |
}, | |
"search_query": { | |
"type": "string", | |
"title": "Search Query", | |
"description": "Query used to retrieve items from the KB database. If not provided (default), it will be generated using LLM.", | |
"default": "" | |
}, | |
"k": { | |
"type": "integer", | |
"maximum": 1024, | |
"exclusiveMinimum": 0, | |
"title": "K", | |
"description": "Top-k closest text in terms of embedding distance. Must be in [1, 1024]. Defaults to 3.", | |
"default": 3, | |
"examples": [ | |
3 | |
] | |
}, | |
"rerank": { | |
"type": "boolean", | |
"title": "Rerank", | |
"description": "Flag to perform rerank on the retrieved results. Defaults to True.", | |
"default": true, | |
"examples": [ | |
true, | |
false | |
] | |
}, | |
"concat_reranker_input": { | |
"type": "boolean", | |
"title": "Concat Reranker Input", | |
"description": "Flag to concat title and content as reranker input. Defaults to False.", | |
"default": false, | |
"examples": [ | |
true, | |
false | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"table_id" | |
], | |
"title": "RAGParams" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"description": "Retrieval Augmented Generation search params. Defaults to None (disabled).", | |
"examples": [ | |
null | |
] | |
}, | |
"temperature": { | |
"type": "number", | |
"maximum": 2, | |
"minimum": 0.001, | |
"title": "Temperature", | |
"description": " What sampling temperature to use, in [0.001, 2.0].\n Higher values like 0.8 will make the output more random,\n while lower values like 0.2 will make it more focused and deterministic.\n", | |
"default": 0.2, | |
"examples": [ | |
0.2 | |
] | |
}, | |
"top_p": { | |
"type": "number", | |
"maximum": 1, | |
"minimum": 0.001, | |
"title": "Top P", | |
"description": " An alternative to sampling with temperature, called nucleus sampling,\n where the model considers the results of the tokens with top_p probability mass.\n So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n Must be in [0.001, 1.0].\n", | |
"default": 0.6, | |
"examples": [ | |
0.6 | |
] | |
}, | |
"n": { | |
"type": "integer", | |
"title": "N", | |
"description": "How many chat completion choices to generate for each input message.", | |
"default": 1, | |
"examples": [ | |
1 | |
] | |
}, | |
"stream": { | |
"type": "boolean", | |
"title": "Stream", | |
"description": "\n If set, partial message deltas will be sent, like in ChatGPT.\n Tokens will be sent as server-sent events as they become available,\n with the stream terminated by a 'data: [DONE]' message.\n", | |
"default": true, | |
"examples": [ | |
true | |
] | |
}, | |
"stop": { | |
"anyOf": [ | |
{ | |
"items": { | |
"type": "string" | |
}, | |
"type": "array" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Stop", | |
"description": "Up to 4 sequences where the API will stop generating further tokens.", | |
"examples": [ | |
null | |
] | |
}, | |
"max_tokens": { | |
"type": "integer", | |
"exclusiveMinimum": 0, | |
"title": "Max Tokens", | |
"description": " The maximum number of tokens to generate in the chat completion.\n Must be in [1, context_length - 1). Default is 2048.\n The total length of input tokens and generated tokens is limited by the model's context length.\n", | |
"default": 2048, | |
"examples": [ | |
2048 | |
] | |
}, | |
"presence_penalty": { | |
"type": "number", | |
"title": "Presence Penalty", | |
"description": " Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far,\n increasing the model's likelihood to talk about new topics.\n", | |
"default": 0, | |
"examples": [ | |
0 | |
] | |
}, | |
"frequency_penalty": { | |
"type": "number", | |
"title": "Frequency Penalty", | |
"description": " Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far,\n decreasing the model's likelihood to repeat the same line verbatim.\n", | |
"default": 0, | |
"examples": [ | |
0 | |
] | |
}, | |
"logit_bias": { | |
"type": "object", | |
"title": "Logit Bias", | |
"description": " Modify the likelihood of specified tokens appearing in the completion.\n Accepts a json object that maps tokens (specified by their token ID in the tokenizer)\n to an associated bias value from -100 to 100.\n Mathematically, the bias is added to the logits generated by the model prior to sampling.\n The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection;\n values like -100 or 100 should result in a ban or exclusive selection of the relevant token.\n", | |
"default": {}, | |
"examples": [ | |
{} | |
] | |
}, | |
"user": { | |
"type": "string", | |
"title": "User", | |
"description": "A unique identifier representing your end-user. For monitoring and debugging purposes.", | |
"default": "", | |
"examples": [ | |
"" | |
] | |
}, | |
"object": { | |
"type": "string", | |
"enum": [ | |
"gen_config.chat" | |
], | |
"const": "gen_config.chat", | |
"title": "Object", | |
"description": "The object type, which is always \"gen_config.chat\".", | |
"default": "gen_config.chat", | |
"examples": [ | |
"gen_config.chat" | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"messages" | |
], | |
"title": "ChatGenConfig" | |
}, | |
{ | |
"properties": { | |
"object": { | |
"type": "string", | |
"enum": [ | |
"gen_config.embed" | |
], | |
"const": "gen_config.embed", | |
"title": "Object", | |
"description": "The object type, which is always \"gen_config.embed\".", | |
"default": "gen_config.embed", | |
"examples": [ | |
"gen_config.embed" | |
] | |
}, | |
"embedding_model": { | |
"type": "string", | |
"title": "Embedding Model", | |
"description": "The embedding model to use.", | |
"examples": [ | |
"openai/text-embedding-3-small-512" | |
] | |
}, | |
"source_column": { | |
"type": "string", | |
"title": "Source Column", | |
"description": "The source column for embedding.", | |
"examples": [ | |
"text_column" | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"embedding_model", | |
"source_column" | |
], | |
"title": "EmbedGenConfig" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Gen Config", | |
"description": "_Optional_. Generation config. If provided, then this column will be an \"Output Column\". Table columns on its left can be referenced by `${column-name}`." | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"id" | |
], | |
"title": "ColumnSchemaCreate" | |
}, | |
"type": "array", | |
"title": "Cols", | |
"description": "List of column schema." | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"id", | |
"cols" | |
], | |
"title": "AddKnowledgeColumnSchema" | |
} | |
} | |
} | |
}, | |
"responses": { | |
"200": { | |
"description": "Successful Response", | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"id": { | |
"type": "string", | |
"title": "Id", | |
"description": "Table name." | |
}, | |
"version": { | |
"type": "string", | |
"title": "Version", | |
"description": "Table version, following owl version.", | |
"default": "0.3.0" | |
}, | |
"meta": { | |
"type": "object", | |
"title": "Meta", | |
"description": "Additional metadata about the table.", | |
"default": {} | |
}, | |
"cols": { | |
"items": { | |
"properties": { | |
"id": { | |
"type": "string", | |
"title": "Id", | |
"description": "Column name." | |
}, | |
"dtype": { | |
"allOf": [ | |
{ | |
"type": "string", | |
"enum": [ | |
"int", | |
"int8", | |
"float", | |
"float32", | |
"float16", | |
"bool", | |
"str", | |
"date-time" | |
], | |
"title": "DtypeEnum" | |
} | |
], | |
"description": "Column data type, one of [\"int\", \"int8\", \"float\", \"float32\", \"float16\", \"bool\", \"str\", \"date-time\"]", | |
"default": "str" | |
}, | |
"vlen": { | |
"type": "integer", | |
"minimum": 0, | |
"title": "Vlen", | |
"description": "_Optional_. Vector length. If this is larger than zero, then `dtype` must be one of the floating data types. Defaults to zero.", | |
"default": 0 | |
}, | |
"index": { | |
"type": "boolean", | |
"title": "Index", | |
"description": "_Optional_. Whether to build full-text-search (FTS) or vector index for this column. Only applies to string and vector columns. Defaults to True.", | |
"default": true | |
}, | |
"gen_config": { | |
"anyOf": [ | |
{ | |
"properties": { | |
"id": { | |
"type": "string", | |
"title": "Id", | |
"description": "Chat ID. Must be unique against document ID for it to be embeddable. Defaults to ''.", | |
"default": "" | |
}, | |
"model": { | |
"type": "string", | |
"title": "Model", | |
"description": "ID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API.", | |
"default": "" | |
}, | |
"messages": { | |
"items": { | |
"properties": { | |
"role": { | |
"type": "string", | |
"enum": [ | |
"system", | |
"user", | |
"assistant" | |
], | |
"title": "ChatRole", | |
"description": "Represents who said a chat message." | |
}, | |
"content": { | |
"type": "string", | |
"title": "Content" | |
}, | |
"name": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Name" | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"role", | |
"content" | |
], | |
"title": "ChatEntry", | |
"description": "Represents a message in the chat context." | |
}, | |
"type": "array", | |
"minItems": 1, | |
"title": "Messages", | |
"description": "A list of messages comprising the conversation so far." | |
}, | |
"rag_params": { | |
"anyOf": [ | |
{ | |
"properties": { | |
"table_id": { | |
"type": "string", | |
"minLength": 2, | |
"title": "Table Id", | |
"description": "Knowledge Table ID", | |
"examples": [ | |
"my-dataset" | |
] | |
}, | |
"reranking_model": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Reranking Model", | |
"description": "Reranking model to use for hybrid search.", | |
"examples": [ | |
"cohere/rerank-multilingual-v3.0", | |
null | |
] | |
}, | |
"search_query": { | |
"type": "string", | |
"title": "Search Query", | |
"description": "Query used to retrieve items from the KB database. If not provided (default), it will be generated using LLM.", | |
"default": "" | |
}, | |
"k": { | |
"type": "integer", | |
"maximum": 1024, | |
"exclusiveMinimum": 0, | |
"title": "K", | |
"description": "Top-k closest text in terms of embedding distance. Must be in [1, 1024]. Defaults to 3.", | |
"default": 3, | |
"examples": [ | |
3 | |
] | |
}, | |
"rerank": { | |
"type": "boolean", | |
"title": "Rerank", | |
"description": "Flag to perform rerank on the retrieved results. Defaults to True.", | |
"default": true, | |
"examples": [ | |
true, | |
false | |
] | |
}, | |
"concat_reranker_input": { | |
"type": "boolean", | |
"title": "Concat Reranker Input", | |
"description": "Flag to concat title and content as reranker input. Defaults to False.", | |
"default": false, | |
"examples": [ | |
true, | |
false | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"table_id" | |
], | |
"title": "RAGParams" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"description": "Retrieval Augmented Generation search params. Defaults to None (disabled).", | |
"examples": [ | |
null | |
] | |
}, | |
"temperature": { | |
"type": "number", | |
"maximum": 2, | |
"minimum": 0.001, | |
"title": "Temperature", | |
"description": " What sampling temperature to use, in [0.001, 2.0].\n Higher values like 0.8 will make the output more random,\n while lower values like 0.2 will make it more focused and deterministic.\n", | |
"default": 0.2, | |
"examples": [ | |
0.2 | |
] | |
}, | |
"top_p": { | |
"type": "number", | |
"maximum": 1, | |
"minimum": 0.001, | |
"title": "Top P", | |
"description": " An alternative to sampling with temperature, called nucleus sampling,\n where the model considers the results of the tokens with top_p probability mass.\n So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n Must be in [0.001, 1.0].\n", | |
"default": 0.6, | |
"examples": [ | |
0.6 | |
] | |
}, | |
"n": { | |
"type": "integer", | |
"title": "N", | |
"description": "How many chat completion choices to generate for each input message.", | |
"default": 1, | |
"examples": [ | |
1 | |
] | |
}, | |
"stream": { | |
"type": "boolean", | |
"title": "Stream", | |
"description": "\n If set, partial message deltas will be sent, like in ChatGPT.\n Tokens will be sent as server-sent events as they become available,\n with the stream terminated by a 'data: [DONE]' message.\n", | |
"default": true, | |
"examples": [ | |
true | |
] | |
}, | |
"stop": { | |
"anyOf": [ | |
{ | |
"items": { | |
"type": "string" | |
}, | |
"type": "array" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Stop", | |
"description": "Up to 4 sequences where the API will stop generating further tokens.", | |
"examples": [ | |
null | |
] | |
}, | |
"max_tokens": { | |
"type": "integer", | |
"exclusiveMinimum": 0, | |
"title": "Max Tokens", | |
"description": " The maximum number of tokens to generate in the chat completion.\n Must be in [1, context_length - 1). Default is 2048.\n The total length of input tokens and generated tokens is limited by the model's context length.\n", | |
"default": 2048, | |
"examples": [ | |
2048 | |
] | |
}, | |
"presence_penalty": { | |
"type": "number", | |
"title": "Presence Penalty", | |
"description": " Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far,\n increasing the model's likelihood to talk about new topics.\n", | |
"default": 0, | |
"examples": [ | |
0 | |
] | |
}, | |
"frequency_penalty": { | |
"type": "number", | |
"title": "Frequency Penalty", | |
"description": " Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far,\n decreasing the model's likelihood to repeat the same line verbatim.\n", | |
"default": 0, | |
"examples": [ | |
0 | |
] | |
}, | |
"logit_bias": { | |
"type": "object", | |
"title": "Logit Bias", | |
"description": " Modify the likelihood of specified tokens appearing in the completion.\n Accepts a json object that maps tokens (specified by their token ID in the tokenizer)\n to an associated bias value from -100 to 100.\n Mathematically, the bias is added to the logits generated by the model prior to sampling.\n The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection;\n values like -100 or 100 should result in a ban or exclusive selection of the relevant token.\n", | |
"default": {}, | |
"examples": [ | |
{} | |
] | |
}, | |
"user": { | |
"type": "string", | |
"title": "User", | |
"description": "A unique identifier representing your end-user. For monitoring and debugging purposes.", | |
"default": "", | |
"examples": [ | |
"" | |
] | |
}, | |
"object": { | |
"type": "string", | |
"enum": [ | |
"gen_config.chat" | |
], | |
"const": "gen_config.chat", | |
"title": "Object", | |
"description": "The object type, which is always \"gen_config.chat\".", | |
"default": "gen_config.chat", | |
"examples": [ | |
"gen_config.chat" | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"messages" | |
], | |
"title": "ChatGenConfig" | |
}, | |
{ | |
"properties": { | |
"object": { | |
"type": "string", | |
"enum": [ | |
"gen_config.embed" | |
], | |
"const": "gen_config.embed", | |
"title": "Object", | |
"description": "The object type, which is always \"gen_config.embed\".", | |
"default": "gen_config.embed", | |
"examples": [ | |
"gen_config.embed" | |
] | |
}, | |
"embedding_model": { | |
"type": "string", | |
"title": "Embedding Model", | |
"description": "The embedding model to use.", | |
"examples": [ | |
"openai/text-embedding-3-small-512" | |
] | |
}, | |
"source_column": { | |
"type": "string", | |
"title": "Source Column", | |
"description": "The source column for embedding.", | |
"examples": [ | |
"text_column" | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"embedding_model", | |
"source_column" | |
], | |
"title": "EmbedGenConfig" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Gen Config", | |
"description": "_Optional_. Generation config. If provided, then this column will be an \"Output Column\". Table columns on its left can be referenced by `${column-name}`." | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"id" | |
], | |
"title": "ColumnSchema" | |
}, | |
"type": "array", | |
"title": "Cols", | |
"description": "List of column schema." | |
}, | |
"parent_id": { | |
"anyOf": [ | |
{ | |
"type": "string", | |
"pattern": "^[A-Za-z0-9]([A-Za-z0-9._-]{0,98}[A-Za-z0-9])?$", | |
"description": "Table name or ID. Must be unique with at least 1 character and up to 100 characters. Must start and end with an alphabet or number. Characters in the middle can include `_` (underscore), `-` (dash), `.` (dot)." | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Parent Id", | |
"description": "The parent table ID. If None (default), it means this is a template table." | |
}, | |
"title": { | |
"type": "string", | |
"title": "Title", | |
"description": "Chat title. Defaults to ''." | |
}, | |
"updated_at": { | |
"type": "string", | |
"title": "Updated At", | |
"description": "Table last update timestamp (ISO 8601 UTC)." | |
}, | |
"indexed_at_fts": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Indexed At Fts", | |
"description": "Table last FTS index timestamp (ISO 8601 UTC)." | |
}, | |
"indexed_at_vec": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Indexed At Vec", | |
"description": "Table last vector index timestamp (ISO 8601 UTC)." | |
}, | |
"indexed_at_sca": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Indexed At Sca", | |
"description": "Table last scalar index timestamp (ISO 8601 UTC)." | |
}, | |
"num_rows": { | |
"type": "integer", | |
"title": "Num Rows", | |
"description": "Number of rows in the table." | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"id", | |
"cols", | |
"parent_id", | |
"title", | |
"updated_at", | |
"indexed_at_fts", | |
"indexed_at_vec", | |
"indexed_at_sca", | |
"num_rows" | |
], | |
"title": "TableMetaResponse" | |
} | |
} | |
} | |
}, | |
"422": { | |
"description": "Validation Error", | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"detail": { | |
"items": { | |
"properties": { | |
"loc": { | |
"items": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "integer" | |
} | |
] | |
}, | |
"type": "array", | |
"title": "Location" | |
}, | |
"msg": { | |
"type": "string", | |
"title": "Message" | |
}, | |
"type": { | |
"type": "string", | |
"title": "Error Type" | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"loc", | |
"msg", | |
"type" | |
], | |
"title": "ValidationError" | |
}, | |
"type": "array", | |
"title": "Detail" | |
} | |
}, | |
"type": "object", | |
"title": "HTTPValidationError" | |
} | |
} | |
} | |
} | |
} | |
} | |
}, | |
"/api/v1/gen_tables/chat/columns/add": { | |
"post": { | |
"tags": [ | |
"Generative Table" | |
], | |
"summary": "Add Chat Columns", | |
"operationId": "add_chat_columns", | |
"parameters": [ | |
{ | |
"name": "openai-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "OpenAI API key.", | |
"default": "", | |
"title": "Openai-Api-Key" | |
}, | |
"description": "OpenAI API key." | |
}, | |
{ | |
"name": "anthropic-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Anthropic API key.", | |
"default": "", | |
"title": "Anthropic-Api-Key" | |
}, | |
"description": "Anthropic API key." | |
}, | |
{ | |
"name": "gemini-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Google Gemini API key.", | |
"default": "", | |
"title": "Gemini-Api-Key" | |
}, | |
"description": "Google Gemini API key." | |
}, | |
{ | |
"name": "cohere-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Cohere API key.", | |
"default": "", | |
"title": "Cohere-Api-Key" | |
}, | |
"description": "Cohere API key." | |
}, | |
{ | |
"name": "groq-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Groq API key.", | |
"default": "", | |
"title": "Groq-Api-Key" | |
}, | |
"description": "Groq API key." | |
}, | |
{ | |
"name": "together-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Together AI API key.", | |
"default": "", | |
"title": "Together-Api-Key" | |
}, | |
"description": "Together AI API key." | |
}, | |
{ | |
"name": "jina-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Jina API key.", | |
"default": "", | |
"title": "Jina-Api-Key" | |
}, | |
"description": "Jina API key." | |
}, | |
{ | |
"name": "voyage-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Voyage API key.", | |
"default": "", | |
"title": "Voyage-Api-Key" | |
}, | |
"description": "Voyage API key." | |
} | |
], | |
"requestBody": { | |
"required": true, | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"id": { | |
"type": "string", | |
"pattern": "^[A-Za-z0-9]([A-Za-z0-9._-]{0,98}[A-Za-z0-9])?$", | |
"title": "Id", | |
"description": "Table name." | |
}, | |
"version": { | |
"type": "string", | |
"title": "Version", | |
"description": "Table version, following owl version.", | |
"default": "0.3.0" | |
}, | |
"meta": { | |
"type": "object", | |
"title": "Meta", | |
"description": "Additional metadata about the table.", | |
"default": {} | |
}, | |
"cols": { | |
"items": { | |
"properties": { | |
"id": { | |
"type": "string", | |
"pattern": "^[A-Za-z0-9]([A-Za-z0-9 _-]{0,98}[A-Za-z0-9])?$", | |
"title": "Id", | |
"description": "Column name." | |
}, | |
"dtype": { | |
"allOf": [ | |
{ | |
"type": "string", | |
"enum": [ | |
"int", | |
"float", | |
"bool", | |
"str" | |
], | |
"title": "DtypeCreateEnum" | |
} | |
], | |
"description": "Column data type, one of [\"int\", \"float\", \"bool\", \"str\"]", | |
"default": "str" | |
}, | |
"vlen": { | |
"type": "integer", | |
"minimum": 0, | |
"title": "Vlen", | |
"description": "_Optional_. Vector length. If this is larger than zero, then `dtype` must be one of the floating data types. Defaults to zero.", | |
"default": 0 | |
}, | |
"index": { | |
"type": "boolean", | |
"title": "Index", | |
"description": "_Optional_. Whether to build full-text-search (FTS) or vector index for this column. Only applies to string and vector columns. Defaults to True.", | |
"default": true | |
}, | |
"gen_config": { | |
"anyOf": [ | |
{ | |
"properties": { | |
"id": { | |
"type": "string", | |
"title": "Id", | |
"description": "Chat ID. Must be unique against document ID for it to be embeddable. Defaults to ''.", | |
"default": "" | |
}, | |
"model": { | |
"type": "string", | |
"title": "Model", | |
"description": "ID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API.", | |
"default": "" | |
}, | |
"messages": { | |
"items": { | |
"properties": { | |
"role": { | |
"type": "string", | |
"enum": [ | |
"system", | |
"user", | |
"assistant" | |
], | |
"title": "ChatRole", | |
"description": "Represents who said a chat message." | |
}, | |
"content": { | |
"type": "string", | |
"title": "Content" | |
}, | |
"name": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Name" | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"role", | |
"content" | |
], | |
"title": "ChatEntry", | |
"description": "Represents a message in the chat context." | |
}, | |
"type": "array", | |
"minItems": 1, | |
"title": "Messages", | |
"description": "A list of messages comprising the conversation so far." | |
}, | |
"rag_params": { | |
"anyOf": [ | |
{ | |
"properties": { | |
"table_id": { | |
"type": "string", | |
"minLength": 2, | |
"title": "Table Id", | |
"description": "Knowledge Table ID", | |
"examples": [ | |
"my-dataset" | |
] | |
}, | |
"reranking_model": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Reranking Model", | |
"description": "Reranking model to use for hybrid search.", | |
"examples": [ | |
"cohere/rerank-multilingual-v3.0", | |
null | |
] | |
}, | |
"search_query": { | |
"type": "string", | |
"title": "Search Query", | |
"description": "Query used to retrieve items from the KB database. If not provided (default), it will be generated using LLM.", | |
"default": "" | |
}, | |
"k": { | |
"type": "integer", | |
"maximum": 1024, | |
"exclusiveMinimum": 0, | |
"title": "K", | |
"description": "Top-k closest text in terms of embedding distance. Must be in [1, 1024]. Defaults to 3.", | |
"default": 3, | |
"examples": [ | |
3 | |
] | |
}, | |
"rerank": { | |
"type": "boolean", | |
"title": "Rerank", | |
"description": "Flag to perform rerank on the retrieved results. Defaults to True.", | |
"default": true, | |
"examples": [ | |
true, | |
false | |
] | |
}, | |
"concat_reranker_input": { | |
"type": "boolean", | |
"title": "Concat Reranker Input", | |
"description": "Flag to concat title and content as reranker input. Defaults to False.", | |
"default": false, | |
"examples": [ | |
true, | |
false | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"table_id" | |
], | |
"title": "RAGParams" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"description": "Retrieval Augmented Generation search params. Defaults to None (disabled).", | |
"examples": [ | |
null | |
] | |
}, | |
"temperature": { | |
"type": "number", | |
"maximum": 2, | |
"minimum": 0.001, | |
"title": "Temperature", | |
"description": " What sampling temperature to use, in [0.001, 2.0].\n Higher values like 0.8 will make the output more random,\n while lower values like 0.2 will make it more focused and deterministic.\n", | |
"default": 0.2, | |
"examples": [ | |
0.2 | |
] | |
}, | |
"top_p": { | |
"type": "number", | |
"maximum": 1, | |
"minimum": 0.001, | |
"title": "Top P", | |
"description": " An alternative to sampling with temperature, called nucleus sampling,\n where the model considers the results of the tokens with top_p probability mass.\n So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n Must be in [0.001, 1.0].\n", | |
"default": 0.6, | |
"examples": [ | |
0.6 | |
] | |
}, | |
"n": { | |
"type": "integer", | |
"title": "N", | |
"description": "How many chat completion choices to generate for each input message.", | |
"default": 1, | |
"examples": [ | |
1 | |
] | |
}, | |
"stream": { | |
"type": "boolean", | |
"title": "Stream", | |
"description": "\n If set, partial message deltas will be sent, like in ChatGPT.\n Tokens will be sent as server-sent events as they become available,\n with the stream terminated by a 'data: [DONE]' message.\n", | |
"default": true, | |
"examples": [ | |
true | |
] | |
}, | |
"stop": { | |
"anyOf": [ | |
{ | |
"items": { | |
"type": "string" | |
}, | |
"type": "array" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Stop", | |
"description": "Up to 4 sequences where the API will stop generating further tokens.", | |
"examples": [ | |
null | |
] | |
}, | |
"max_tokens": { | |
"type": "integer", | |
"exclusiveMinimum": 0, | |
"title": "Max Tokens", | |
"description": " The maximum number of tokens to generate in the chat completion.\n Must be in [1, context_length - 1). Default is 2048.\n The total length of input tokens and generated tokens is limited by the model's context length.\n", | |
"default": 2048, | |
"examples": [ | |
2048 | |
] | |
}, | |
"presence_penalty": { | |
"type": "number", | |
"title": "Presence Penalty", | |
"description": " Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far,\n increasing the model's likelihood to talk about new topics.\n", | |
"default": 0, | |
"examples": [ | |
0 | |
] | |
}, | |
"frequency_penalty": { | |
"type": "number", | |
"title": "Frequency Penalty", | |
"description": " Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far,\n decreasing the model's likelihood to repeat the same line verbatim.\n", | |
"default": 0, | |
"examples": [ | |
0 | |
] | |
}, | |
"logit_bias": { | |
"type": "object", | |
"title": "Logit Bias", | |
"description": " Modify the likelihood of specified tokens appearing in the completion.\n Accepts a json object that maps tokens (specified by their token ID in the tokenizer)\n to an associated bias value from -100 to 100.\n Mathematically, the bias is added to the logits generated by the model prior to sampling.\n The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection;\n values like -100 or 100 should result in a ban or exclusive selection of the relevant token.\n", | |
"default": {}, | |
"examples": [ | |
{} | |
] | |
}, | |
"user": { | |
"type": "string", | |
"title": "User", | |
"description": "A unique identifier representing your end-user. For monitoring and debugging purposes.", | |
"default": "", | |
"examples": [ | |
"" | |
] | |
}, | |
"object": { | |
"type": "string", | |
"enum": [ | |
"gen_config.chat" | |
], | |
"const": "gen_config.chat", | |
"title": "Object", | |
"description": "The object type, which is always \"gen_config.chat\".", | |
"default": "gen_config.chat", | |
"examples": [ | |
"gen_config.chat" | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"messages" | |
], | |
"title": "ChatGenConfig" | |
}, | |
{ | |
"properties": { | |
"object": { | |
"type": "string", | |
"enum": [ | |
"gen_config.embed" | |
], | |
"const": "gen_config.embed", | |
"title": "Object", | |
"description": "The object type, which is always \"gen_config.embed\".", | |
"default": "gen_config.embed", | |
"examples": [ | |
"gen_config.embed" | |
] | |
}, | |
"embedding_model": { | |
"type": "string", | |
"title": "Embedding Model", | |
"description": "The embedding model to use.", | |
"examples": [ | |
"openai/text-embedding-3-small-512" | |
] | |
}, | |
"source_column": { | |
"type": "string", | |
"title": "Source Column", | |
"description": "The source column for embedding.", | |
"examples": [ | |
"text_column" | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"embedding_model", | |
"source_column" | |
], | |
"title": "EmbedGenConfig" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Gen Config", | |
"description": "_Optional_. Generation config. If provided, then this column will be an \"Output Column\". Table columns on its left can be referenced by `${column-name}`." | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"id" | |
], | |
"title": "ColumnSchemaCreate" | |
}, | |
"type": "array", | |
"title": "Cols", | |
"description": "List of column schema." | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"id", | |
"cols" | |
], | |
"title": "AddChatColumnSchema" | |
} | |
} | |
} | |
}, | |
"responses": { | |
"200": { | |
"description": "Successful Response", | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"id": { | |
"type": "string", | |
"title": "Id", | |
"description": "Table name." | |
}, | |
"version": { | |
"type": "string", | |
"title": "Version", | |
"description": "Table version, following owl version.", | |
"default": "0.3.0" | |
}, | |
"meta": { | |
"type": "object", | |
"title": "Meta", | |
"description": "Additional metadata about the table.", | |
"default": {} | |
}, | |
"cols": { | |
"items": { | |
"properties": { | |
"id": { | |
"type": "string", | |
"title": "Id", | |
"description": "Column name." | |
}, | |
"dtype": { | |
"allOf": [ | |
{ | |
"type": "string", | |
"enum": [ | |
"int", | |
"int8", | |
"float", | |
"float32", | |
"float16", | |
"bool", | |
"str", | |
"date-time" | |
], | |
"title": "DtypeEnum" | |
} | |
], | |
"description": "Column data type, one of [\"int\", \"int8\", \"float\", \"float32\", \"float16\", \"bool\", \"str\", \"date-time\"]", | |
"default": "str" | |
}, | |
"vlen": { | |
"type": "integer", | |
"minimum": 0, | |
"title": "Vlen", | |
"description": "_Optional_. Vector length. If this is larger than zero, then `dtype` must be one of the floating data types. Defaults to zero.", | |
"default": 0 | |
}, | |
"index": { | |
"type": "boolean", | |
"title": "Index", | |
"description": "_Optional_. Whether to build full-text-search (FTS) or vector index for this column. Only applies to string and vector columns. Defaults to True.", | |
"default": true | |
}, | |
"gen_config": { | |
"anyOf": [ | |
{ | |
"properties": { | |
"id": { | |
"type": "string", | |
"title": "Id", | |
"description": "Chat ID. Must be unique against document ID for it to be embeddable. Defaults to ''.", | |
"default": "" | |
}, | |
"model": { | |
"type": "string", | |
"title": "Model", | |
"description": "ID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API.", | |
"default": "" | |
}, | |
"messages": { | |
"items": { | |
"properties": { | |
"role": { | |
"type": "string", | |
"enum": [ | |
"system", | |
"user", | |
"assistant" | |
], | |
"title": "ChatRole", | |
"description": "Represents who said a chat message." | |
}, | |
"content": { | |
"type": "string", | |
"title": "Content" | |
}, | |
"name": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Name" | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"role", | |
"content" | |
], | |
"title": "ChatEntry", | |
"description": "Represents a message in the chat context." | |
}, | |
"type": "array", | |
"minItems": 1, | |
"title": "Messages", | |
"description": "A list of messages comprising the conversation so far." | |
}, | |
"rag_params": { | |
"anyOf": [ | |
{ | |
"properties": { | |
"table_id": { | |
"type": "string", | |
"minLength": 2, | |
"title": "Table Id", | |
"description": "Knowledge Table ID", | |
"examples": [ | |
"my-dataset" | |
] | |
}, | |
"reranking_model": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Reranking Model", | |
"description": "Reranking model to use for hybrid search.", | |
"examples": [ | |
"cohere/rerank-multilingual-v3.0", | |
null | |
] | |
}, | |
"search_query": { | |
"type": "string", | |
"title": "Search Query", | |
"description": "Query used to retrieve items from the KB database. If not provided (default), it will be generated using LLM.", | |
"default": "" | |
}, | |
"k": { | |
"type": "integer", | |
"maximum": 1024, | |
"exclusiveMinimum": 0, | |
"title": "K", | |
"description": "Top-k closest text in terms of embedding distance. Must be in [1, 1024]. Defaults to 3.", | |
"default": 3, | |
"examples": [ | |
3 | |
] | |
}, | |
"rerank": { | |
"type": "boolean", | |
"title": "Rerank", | |
"description": "Flag to perform rerank on the retrieved results. Defaults to True.", | |
"default": true, | |
"examples": [ | |
true, | |
false | |
] | |
}, | |
"concat_reranker_input": { | |
"type": "boolean", | |
"title": "Concat Reranker Input", | |
"description": "Flag to concat title and content as reranker input. Defaults to False.", | |
"default": false, | |
"examples": [ | |
true, | |
false | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"table_id" | |
], | |
"title": "RAGParams" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"description": "Retrieval Augmented Generation search params. Defaults to None (disabled).", | |
"examples": [ | |
null | |
] | |
}, | |
"temperature": { | |
"type": "number", | |
"maximum": 2, | |
"minimum": 0.001, | |
"title": "Temperature", | |
"description": " What sampling temperature to use, in [0.001, 2.0].\n Higher values like 0.8 will make the output more random,\n while lower values like 0.2 will make it more focused and deterministic.\n", | |
"default": 0.2, | |
"examples": [ | |
0.2 | |
] | |
}, | |
"top_p": { | |
"type": "number", | |
"maximum": 1, | |
"minimum": 0.001, | |
"title": "Top P", | |
"description": " An alternative to sampling with temperature, called nucleus sampling,\n where the model considers the results of the tokens with top_p probability mass.\n So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n Must be in [0.001, 1.0].\n", | |
"default": 0.6, | |
"examples": [ | |
0.6 | |
] | |
}, | |
"n": { | |
"type": "integer", | |
"title": "N", | |
"description": "How many chat completion choices to generate for each input message.", | |
"default": 1, | |
"examples": [ | |
1 | |
] | |
}, | |
"stream": { | |
"type": "boolean", | |
"title": "Stream", | |
"description": "\n If set, partial message deltas will be sent, like in ChatGPT.\n Tokens will be sent as server-sent events as they become available,\n with the stream terminated by a 'data: [DONE]' message.\n", | |
"default": true, | |
"examples": [ | |
true | |
] | |
}, | |
"stop": { | |
"anyOf": [ | |
{ | |
"items": { | |
"type": "string" | |
}, | |
"type": "array" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Stop", | |
"description": "Up to 4 sequences where the API will stop generating further tokens.", | |
"examples": [ | |
null | |
] | |
}, | |
"max_tokens": { | |
"type": "integer", | |
"exclusiveMinimum": 0, | |
"title": "Max Tokens", | |
"description": " The maximum number of tokens to generate in the chat completion.\n Must be in [1, context_length - 1). Default is 2048.\n The total length of input tokens and generated tokens is limited by the model's context length.\n", | |
"default": 2048, | |
"examples": [ | |
2048 | |
] | |
}, | |
"presence_penalty": { | |
"type": "number", | |
"title": "Presence Penalty", | |
"description": " Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far,\n increasing the model's likelihood to talk about new topics.\n", | |
"default": 0, | |
"examples": [ | |
0 | |
] | |
}, | |
"frequency_penalty": { | |
"type": "number", | |
"title": "Frequency Penalty", | |
"description": " Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far,\n decreasing the model's likelihood to repeat the same line verbatim.\n", | |
"default": 0, | |
"examples": [ | |
0 | |
] | |
}, | |
"logit_bias": { | |
"type": "object", | |
"title": "Logit Bias", | |
"description": " Modify the likelihood of specified tokens appearing in the completion.\n Accepts a json object that maps tokens (specified by their token ID in the tokenizer)\n to an associated bias value from -100 to 100.\n Mathematically, the bias is added to the logits generated by the model prior to sampling.\n The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection;\n values like -100 or 100 should result in a ban or exclusive selection of the relevant token.\n", | |
"default": {}, | |
"examples": [ | |
{} | |
] | |
}, | |
"user": { | |
"type": "string", | |
"title": "User", | |
"description": "A unique identifier representing your end-user. For monitoring and debugging purposes.", | |
"default": "", | |
"examples": [ | |
"" | |
] | |
}, | |
"object": { | |
"type": "string", | |
"enum": [ | |
"gen_config.chat" | |
], | |
"const": "gen_config.chat", | |
"title": "Object", | |
"description": "The object type, which is always \"gen_config.chat\".", | |
"default": "gen_config.chat", | |
"examples": [ | |
"gen_config.chat" | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"messages" | |
], | |
"title": "ChatGenConfig" | |
}, | |
{ | |
"properties": { | |
"object": { | |
"type": "string", | |
"enum": [ | |
"gen_config.embed" | |
], | |
"const": "gen_config.embed", | |
"title": "Object", | |
"description": "The object type, which is always \"gen_config.embed\".", | |
"default": "gen_config.embed", | |
"examples": [ | |
"gen_config.embed" | |
] | |
}, | |
"embedding_model": { | |
"type": "string", | |
"title": "Embedding Model", | |
"description": "The embedding model to use.", | |
"examples": [ | |
"openai/text-embedding-3-small-512" | |
] | |
}, | |
"source_column": { | |
"type": "string", | |
"title": "Source Column", | |
"description": "The source column for embedding.", | |
"examples": [ | |
"text_column" | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"embedding_model", | |
"source_column" | |
], | |
"title": "EmbedGenConfig" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Gen Config", | |
"description": "_Optional_. Generation config. If provided, then this column will be an \"Output Column\". Table columns on its left can be referenced by `${column-name}`." | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"id" | |
], | |
"title": "ColumnSchema" | |
}, | |
"type": "array", | |
"title": "Cols", | |
"description": "List of column schema." | |
}, | |
"parent_id": { | |
"anyOf": [ | |
{ | |
"type": "string", | |
"pattern": "^[A-Za-z0-9]([A-Za-z0-9._-]{0,98}[A-Za-z0-9])?$", | |
"description": "Table name or ID. Must be unique with at least 1 character and up to 100 characters. Must start and end with an alphabet or number. Characters in the middle can include `_` (underscore), `-` (dash), `.` (dot)." | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Parent Id", | |
"description": "The parent table ID. If None (default), it means this is a template table." | |
}, | |
"title": { | |
"type": "string", | |
"title": "Title", | |
"description": "Chat title. Defaults to ''." | |
}, | |
"updated_at": { | |
"type": "string", | |
"title": "Updated At", | |
"description": "Table last update timestamp (ISO 8601 UTC)." | |
}, | |
"indexed_at_fts": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Indexed At Fts", | |
"description": "Table last FTS index timestamp (ISO 8601 UTC)." | |
}, | |
"indexed_at_vec": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Indexed At Vec", | |
"description": "Table last vector index timestamp (ISO 8601 UTC)." | |
}, | |
"indexed_at_sca": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Indexed At Sca", | |
"description": "Table last scalar index timestamp (ISO 8601 UTC)." | |
}, | |
"num_rows": { | |
"type": "integer", | |
"title": "Num Rows", | |
"description": "Number of rows in the table." | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"id", | |
"cols", | |
"parent_id", | |
"title", | |
"updated_at", | |
"indexed_at_fts", | |
"indexed_at_vec", | |
"indexed_at_sca", | |
"num_rows" | |
], | |
"title": "TableMetaResponse" | |
} | |
} | |
} | |
}, | |
"422": { | |
"description": "Validation Error", | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"detail": { | |
"items": { | |
"properties": { | |
"loc": { | |
"items": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "integer" | |
} | |
] | |
}, | |
"type": "array", | |
"title": "Location" | |
}, | |
"msg": { | |
"type": "string", | |
"title": "Message" | |
}, | |
"type": { | |
"type": "string", | |
"title": "Error Type" | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"loc", | |
"msg", | |
"type" | |
], | |
"title": "ValidationError" | |
}, | |
"type": "array", | |
"title": "Detail" | |
} | |
}, | |
"type": "object", | |
"title": "HTTPValidationError" | |
} | |
} | |
} | |
} | |
} | |
} | |
}, | |
"/api/v1/gen_tables/{table_type}/columns/drop": { | |
"post": { | |
"tags": [ | |
"Generative Table" | |
], | |
"summary": "Drop Columns", | |
"operationId": "drop_columns", | |
"parameters": [ | |
{ | |
"name": "table_type", | |
"in": "path", | |
"required": true, | |
"schema": { | |
"allOf": [ | |
{ | |
"type": "string", | |
"enum": [ | |
"action", | |
"knowledge", | |
"chat" | |
], | |
"title": "TableType" | |
} | |
], | |
"description": "Table type.", | |
"title": "Table Type" | |
}, | |
"description": "Table type." | |
} | |
], | |
"requestBody": { | |
"required": true, | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"table_id": { | |
"type": "string", | |
"pattern": "^[A-Za-z0-9]([A-Za-z0-9._-]{0,98}[A-Za-z0-9])?$", | |
"title": "Table Id", | |
"description": "Table name or ID." | |
}, | |
"column_names": { | |
"items": { | |
"type": "string", | |
"pattern": "^[A-Za-z0-9]([A-Za-z0-9 _-]{0,98}[A-Za-z0-9])?$", | |
"description": "Column name or ID. Must be unique with at least 1 character and up to 100 characters. Must start and end with an alphabet or number. Characters in the middle can include `_` (underscore), `-` (dash), ` ` (space). Cannot be called \"ID\" or \"Updated at\" (case-insensitive)." | |
}, | |
"type": "array", | |
"title": "Column Names", | |
"description": "List of column ID to drop." | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"table_id", | |
"column_names" | |
], | |
"title": "ColumnDropRequest" | |
} | |
} | |
} | |
}, | |
"responses": { | |
"200": { | |
"description": "Successful Response", | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"id": { | |
"type": "string", | |
"title": "Id", | |
"description": "Table name." | |
}, | |
"version": { | |
"type": "string", | |
"title": "Version", | |
"description": "Table version, following owl version.", | |
"default": "0.3.0" | |
}, | |
"meta": { | |
"type": "object", | |
"title": "Meta", | |
"description": "Additional metadata about the table.", | |
"default": {} | |
}, | |
"cols": { | |
"items": { | |
"properties": { | |
"id": { | |
"type": "string", | |
"title": "Id", | |
"description": "Column name." | |
}, | |
"dtype": { | |
"allOf": [ | |
{ | |
"type": "string", | |
"enum": [ | |
"int", | |
"int8", | |
"float", | |
"float32", | |
"float16", | |
"bool", | |
"str", | |
"date-time" | |
], | |
"title": "DtypeEnum" | |
} | |
], | |
"description": "Column data type, one of [\"int\", \"int8\", \"float\", \"float32\", \"float16\", \"bool\", \"str\", \"date-time\"]", | |
"default": "str" | |
}, | |
"vlen": { | |
"type": "integer", | |
"minimum": 0, | |
"title": "Vlen", | |
"description": "_Optional_. Vector length. If this is larger than zero, then `dtype` must be one of the floating data types. Defaults to zero.", | |
"default": 0 | |
}, | |
"index": { | |
"type": "boolean", | |
"title": "Index", | |
"description": "_Optional_. Whether to build full-text-search (FTS) or vector index for this column. Only applies to string and vector columns. Defaults to True.", | |
"default": true | |
}, | |
"gen_config": { | |
"anyOf": [ | |
{ | |
"properties": { | |
"id": { | |
"type": "string", | |
"title": "Id", | |
"description": "Chat ID. Must be unique against document ID for it to be embeddable. Defaults to ''.", | |
"default": "" | |
}, | |
"model": { | |
"type": "string", | |
"title": "Model", | |
"description": "ID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API.", | |
"default": "" | |
}, | |
"messages": { | |
"items": { | |
"properties": { | |
"role": { | |
"type": "string", | |
"enum": [ | |
"system", | |
"user", | |
"assistant" | |
], | |
"title": "ChatRole", | |
"description": "Represents who said a chat message." | |
}, | |
"content": { | |
"type": "string", | |
"title": "Content" | |
}, | |
"name": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Name" | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"role", | |
"content" | |
], | |
"title": "ChatEntry", | |
"description": "Represents a message in the chat context." | |
}, | |
"type": "array", | |
"minItems": 1, | |
"title": "Messages", | |
"description": "A list of messages comprising the conversation so far." | |
}, | |
"rag_params": { | |
"anyOf": [ | |
{ | |
"properties": { | |
"table_id": { | |
"type": "string", | |
"minLength": 2, | |
"title": "Table Id", | |
"description": "Knowledge Table ID", | |
"examples": [ | |
"my-dataset" | |
] | |
}, | |
"reranking_model": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Reranking Model", | |
"description": "Reranking model to use for hybrid search.", | |
"examples": [ | |
"cohere/rerank-multilingual-v3.0", | |
null | |
] | |
}, | |
"search_query": { | |
"type": "string", | |
"title": "Search Query", | |
"description": "Query used to retrieve items from the KB database. If not provided (default), it will be generated using LLM.", | |
"default": "" | |
}, | |
"k": { | |
"type": "integer", | |
"maximum": 1024, | |
"exclusiveMinimum": 0, | |
"title": "K", | |
"description": "Top-k closest text in terms of embedding distance. Must be in [1, 1024]. Defaults to 3.", | |
"default": 3, | |
"examples": [ | |
3 | |
] | |
}, | |
"rerank": { | |
"type": "boolean", | |
"title": "Rerank", | |
"description": "Flag to perform rerank on the retrieved results. Defaults to True.", | |
"default": true, | |
"examples": [ | |
true, | |
false | |
] | |
}, | |
"concat_reranker_input": { | |
"type": "boolean", | |
"title": "Concat Reranker Input", | |
"description": "Flag to concat title and content as reranker input. Defaults to False.", | |
"default": false, | |
"examples": [ | |
true, | |
false | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"table_id" | |
], | |
"title": "RAGParams" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"description": "Retrieval Augmented Generation search params. Defaults to None (disabled).", | |
"examples": [ | |
null | |
] | |
}, | |
"temperature": { | |
"type": "number", | |
"maximum": 2, | |
"minimum": 0.001, | |
"title": "Temperature", | |
"description": " What sampling temperature to use, in [0.001, 2.0].\n Higher values like 0.8 will make the output more random,\n while lower values like 0.2 will make it more focused and deterministic.\n", | |
"default": 0.2, | |
"examples": [ | |
0.2 | |
] | |
}, | |
"top_p": { | |
"type": "number", | |
"maximum": 1, | |
"minimum": 0.001, | |
"title": "Top P", | |
"description": " An alternative to sampling with temperature, called nucleus sampling,\n where the model considers the results of the tokens with top_p probability mass.\n So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n Must be in [0.001, 1.0].\n", | |
"default": 0.6, | |
"examples": [ | |
0.6 | |
] | |
}, | |
"n": { | |
"type": "integer", | |
"title": "N", | |
"description": "How many chat completion choices to generate for each input message.", | |
"default": 1, | |
"examples": [ | |
1 | |
] | |
}, | |
"stream": { | |
"type": "boolean", | |
"title": "Stream", | |
"description": "\n If set, partial message deltas will be sent, like in ChatGPT.\n Tokens will be sent as server-sent events as they become available,\n with the stream terminated by a 'data: [DONE]' message.\n", | |
"default": true, | |
"examples": [ | |
true | |
] | |
}, | |
"stop": { | |
"anyOf": [ | |
{ | |
"items": { | |
"type": "string" | |
}, | |
"type": "array" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Stop", | |
"description": "Up to 4 sequences where the API will stop generating further tokens.", | |
"examples": [ | |
null | |
] | |
}, | |
"max_tokens": { | |
"type": "integer", | |
"exclusiveMinimum": 0, | |
"title": "Max Tokens", | |
"description": " The maximum number of tokens to generate in the chat completion.\n Must be in [1, context_length - 1). Default is 2048.\n The total length of input tokens and generated tokens is limited by the model's context length.\n", | |
"default": 2048, | |
"examples": [ | |
2048 | |
] | |
}, | |
"presence_penalty": { | |
"type": "number", | |
"title": "Presence Penalty", | |
"description": " Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far,\n increasing the model's likelihood to talk about new topics.\n", | |
"default": 0, | |
"examples": [ | |
0 | |
] | |
}, | |
"frequency_penalty": { | |
"type": "number", | |
"title": "Frequency Penalty", | |
"description": " Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far,\n decreasing the model's likelihood to repeat the same line verbatim.\n", | |
"default": 0, | |
"examples": [ | |
0 | |
] | |
}, | |
"logit_bias": { | |
"type": "object", | |
"title": "Logit Bias", | |
"description": " Modify the likelihood of specified tokens appearing in the completion.\n Accepts a json object that maps tokens (specified by their token ID in the tokenizer)\n to an associated bias value from -100 to 100.\n Mathematically, the bias is added to the logits generated by the model prior to sampling.\n The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection;\n values like -100 or 100 should result in a ban or exclusive selection of the relevant token.\n", | |
"default": {}, | |
"examples": [ | |
{} | |
] | |
}, | |
"user": { | |
"type": "string", | |
"title": "User", | |
"description": "A unique identifier representing your end-user. For monitoring and debugging purposes.", | |
"default": "", | |
"examples": [ | |
"" | |
] | |
}, | |
"object": { | |
"type": "string", | |
"enum": [ | |
"gen_config.chat" | |
], | |
"const": "gen_config.chat", | |
"title": "Object", | |
"description": "The object type, which is always \"gen_config.chat\".", | |
"default": "gen_config.chat", | |
"examples": [ | |
"gen_config.chat" | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"messages" | |
], | |
"title": "ChatGenConfig" | |
}, | |
{ | |
"properties": { | |
"object": { | |
"type": "string", | |
"enum": [ | |
"gen_config.embed" | |
], | |
"const": "gen_config.embed", | |
"title": "Object", | |
"description": "The object type, which is always \"gen_config.embed\".", | |
"default": "gen_config.embed", | |
"examples": [ | |
"gen_config.embed" | |
] | |
}, | |
"embedding_model": { | |
"type": "string", | |
"title": "Embedding Model", | |
"description": "The embedding model to use.", | |
"examples": [ | |
"openai/text-embedding-3-small-512" | |
] | |
}, | |
"source_column": { | |
"type": "string", | |
"title": "Source Column", | |
"description": "The source column for embedding.", | |
"examples": [ | |
"text_column" | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"embedding_model", | |
"source_column" | |
], | |
"title": "EmbedGenConfig" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Gen Config", | |
"description": "_Optional_. Generation config. If provided, then this column will be an \"Output Column\". Table columns on its left can be referenced by `${column-name}`." | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"id" | |
], | |
"title": "ColumnSchema" | |
}, | |
"type": "array", | |
"title": "Cols", | |
"description": "List of column schema." | |
}, | |
"parent_id": { | |
"anyOf": [ | |
{ | |
"type": "string", | |
"pattern": "^[A-Za-z0-9]([A-Za-z0-9._-]{0,98}[A-Za-z0-9])?$", | |
"description": "Table name or ID. Must be unique with at least 1 character and up to 100 characters. Must start and end with an alphabet or number. Characters in the middle can include `_` (underscore), `-` (dash), `.` (dot)." | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Parent Id", | |
"description": "The parent table ID. If None (default), it means this is a template table." | |
}, | |
"title": { | |
"type": "string", | |
"title": "Title", | |
"description": "Chat title. Defaults to ''." | |
}, | |
"updated_at": { | |
"type": "string", | |
"title": "Updated At", | |
"description": "Table last update timestamp (ISO 8601 UTC)." | |
}, | |
"indexed_at_fts": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Indexed At Fts", | |
"description": "Table last FTS index timestamp (ISO 8601 UTC)." | |
}, | |
"indexed_at_vec": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Indexed At Vec", | |
"description": "Table last vector index timestamp (ISO 8601 UTC)." | |
}, | |
"indexed_at_sca": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Indexed At Sca", | |
"description": "Table last scalar index timestamp (ISO 8601 UTC)." | |
}, | |
"num_rows": { | |
"type": "integer", | |
"title": "Num Rows", | |
"description": "Number of rows in the table." | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"id", | |
"cols", | |
"parent_id", | |
"title", | |
"updated_at", | |
"indexed_at_fts", | |
"indexed_at_vec", | |
"indexed_at_sca", | |
"num_rows" | |
], | |
"title": "TableMetaResponse" | |
} | |
} | |
} | |
}, | |
"422": { | |
"description": "Validation Error", | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"detail": { | |
"items": { | |
"properties": { | |
"loc": { | |
"items": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "integer" | |
} | |
] | |
}, | |
"type": "array", | |
"title": "Location" | |
}, | |
"msg": { | |
"type": "string", | |
"title": "Message" | |
}, | |
"type": { | |
"type": "string", | |
"title": "Error Type" | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"loc", | |
"msg", | |
"type" | |
], | |
"title": "ValidationError" | |
}, | |
"type": "array", | |
"title": "Detail" | |
} | |
}, | |
"type": "object", | |
"title": "HTTPValidationError" | |
} | |
} | |
} | |
} | |
} | |
} | |
}, | |
"/api/v1/gen_tables/{table_type}/columns/rename": { | |
"post": { | |
"tags": [ | |
"Generative Table" | |
], | |
"summary": "Rename Columns", | |
"operationId": "rename_columns", | |
"parameters": [ | |
{ | |
"name": "table_type", | |
"in": "path", | |
"required": true, | |
"schema": { | |
"allOf": [ | |
{ | |
"type": "string", | |
"enum": [ | |
"action", | |
"knowledge", | |
"chat" | |
], | |
"title": "TableType" | |
} | |
], | |
"description": "Table type.", | |
"title": "Table Type" | |
}, | |
"description": "Table type." | |
} | |
], | |
"requestBody": { | |
"required": true, | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"table_id": { | |
"type": "string", | |
"pattern": "^[A-Za-z0-9]([A-Za-z0-9._-]{0,98}[A-Za-z0-9])?$", | |
"title": "Table Id", | |
"description": "Table name or ID." | |
}, | |
"column_map": { | |
"patternProperties": { | |
"^[A-Za-z0-9]([A-Za-z0-9 _-]{0,98}[A-Za-z0-9])?$": { | |
"type": "string", | |
"pattern": "^[A-Za-z0-9]([A-Za-z0-9 _-]{0,98}[A-Za-z0-9])?$", | |
"description": "Column name or ID. Must be unique with at least 1 character and up to 100 characters. Must start and end with an alphabet or number. Characters in the middle can include `_` (underscore), `-` (dash), ` ` (space). Cannot be called \"ID\" or \"Updated at\" (case-insensitive)." | |
} | |
}, | |
"type": "object", | |
"title": "Column Map", | |
"description": "Mapping of old column names to new column names." | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"table_id", | |
"column_map" | |
], | |
"title": "ColumnRenameRequest" | |
} | |
} | |
} | |
}, | |
"responses": { | |
"200": { | |
"description": "Successful Response", | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"id": { | |
"type": "string", | |
"title": "Id", | |
"description": "Table name." | |
}, | |
"version": { | |
"type": "string", | |
"title": "Version", | |
"description": "Table version, following owl version.", | |
"default": "0.3.0" | |
}, | |
"meta": { | |
"type": "object", | |
"title": "Meta", | |
"description": "Additional metadata about the table.", | |
"default": {} | |
}, | |
"cols": { | |
"items": { | |
"properties": { | |
"id": { | |
"type": "string", | |
"title": "Id", | |
"description": "Column name." | |
}, | |
"dtype": { | |
"allOf": [ | |
{ | |
"type": "string", | |
"enum": [ | |
"int", | |
"int8", | |
"float", | |
"float32", | |
"float16", | |
"bool", | |
"str", | |
"date-time" | |
], | |
"title": "DtypeEnum" | |
} | |
], | |
"description": "Column data type, one of [\"int\", \"int8\", \"float\", \"float32\", \"float16\", \"bool\", \"str\", \"date-time\"]", | |
"default": "str" | |
}, | |
"vlen": { | |
"type": "integer", | |
"minimum": 0, | |
"title": "Vlen", | |
"description": "_Optional_. Vector length. If this is larger than zero, then `dtype` must be one of the floating data types. Defaults to zero.", | |
"default": 0 | |
}, | |
"index": { | |
"type": "boolean", | |
"title": "Index", | |
"description": "_Optional_. Whether to build full-text-search (FTS) or vector index for this column. Only applies to string and vector columns. Defaults to True.", | |
"default": true | |
}, | |
"gen_config": { | |
"anyOf": [ | |
{ | |
"properties": { | |
"id": { | |
"type": "string", | |
"title": "Id", | |
"description": "Chat ID. Must be unique against document ID for it to be embeddable. Defaults to ''.", | |
"default": "" | |
}, | |
"model": { | |
"type": "string", | |
"title": "Model", | |
"description": "ID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API.", | |
"default": "" | |
}, | |
"messages": { | |
"items": { | |
"properties": { | |
"role": { | |
"type": "string", | |
"enum": [ | |
"system", | |
"user", | |
"assistant" | |
], | |
"title": "ChatRole", | |
"description": "Represents who said a chat message." | |
}, | |
"content": { | |
"type": "string", | |
"title": "Content" | |
}, | |
"name": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Name" | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"role", | |
"content" | |
], | |
"title": "ChatEntry", | |
"description": "Represents a message in the chat context." | |
}, | |
"type": "array", | |
"minItems": 1, | |
"title": "Messages", | |
"description": "A list of messages comprising the conversation so far." | |
}, | |
"rag_params": { | |
"anyOf": [ | |
{ | |
"properties": { | |
"table_id": { | |
"type": "string", | |
"minLength": 2, | |
"title": "Table Id", | |
"description": "Knowledge Table ID", | |
"examples": [ | |
"my-dataset" | |
] | |
}, | |
"reranking_model": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Reranking Model", | |
"description": "Reranking model to use for hybrid search.", | |
"examples": [ | |
"cohere/rerank-multilingual-v3.0", | |
null | |
] | |
}, | |
"search_query": { | |
"type": "string", | |
"title": "Search Query", | |
"description": "Query used to retrieve items from the KB database. If not provided (default), it will be generated using LLM.", | |
"default": "" | |
}, | |
"k": { | |
"type": "integer", | |
"maximum": 1024, | |
"exclusiveMinimum": 0, | |
"title": "K", | |
"description": "Top-k closest text in terms of embedding distance. Must be in [1, 1024]. Defaults to 3.", | |
"default": 3, | |
"examples": [ | |
3 | |
] | |
}, | |
"rerank": { | |
"type": "boolean", | |
"title": "Rerank", | |
"description": "Flag to perform rerank on the retrieved results. Defaults to True.", | |
"default": true, | |
"examples": [ | |
true, | |
false | |
] | |
}, | |
"concat_reranker_input": { | |
"type": "boolean", | |
"title": "Concat Reranker Input", | |
"description": "Flag to concat title and content as reranker input. Defaults to False.", | |
"default": false, | |
"examples": [ | |
true, | |
false | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"table_id" | |
], | |
"title": "RAGParams" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"description": "Retrieval Augmented Generation search params. Defaults to None (disabled).", | |
"examples": [ | |
null | |
] | |
}, | |
"temperature": { | |
"type": "number", | |
"maximum": 2, | |
"minimum": 0.001, | |
"title": "Temperature", | |
"description": " What sampling temperature to use, in [0.001, 2.0].\n Higher values like 0.8 will make the output more random,\n while lower values like 0.2 will make it more focused and deterministic.\n", | |
"default": 0.2, | |
"examples": [ | |
0.2 | |
] | |
}, | |
"top_p": { | |
"type": "number", | |
"maximum": 1, | |
"minimum": 0.001, | |
"title": "Top P", | |
"description": " An alternative to sampling with temperature, called nucleus sampling,\n where the model considers the results of the tokens with top_p probability mass.\n So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n Must be in [0.001, 1.0].\n", | |
"default": 0.6, | |
"examples": [ | |
0.6 | |
] | |
}, | |
"n": { | |
"type": "integer", | |
"title": "N", | |
"description": "How many chat completion choices to generate for each input message.", | |
"default": 1, | |
"examples": [ | |
1 | |
] | |
}, | |
"stream": { | |
"type": "boolean", | |
"title": "Stream", | |
"description": "\n If set, partial message deltas will be sent, like in ChatGPT.\n Tokens will be sent as server-sent events as they become available,\n with the stream terminated by a 'data: [DONE]' message.\n", | |
"default": true, | |
"examples": [ | |
true | |
] | |
}, | |
"stop": { | |
"anyOf": [ | |
{ | |
"items": { | |
"type": "string" | |
}, | |
"type": "array" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Stop", | |
"description": "Up to 4 sequences where the API will stop generating further tokens.", | |
"examples": [ | |
null | |
] | |
}, | |
"max_tokens": { | |
"type": "integer", | |
"exclusiveMinimum": 0, | |
"title": "Max Tokens", | |
"description": " The maximum number of tokens to generate in the chat completion.\n Must be in [1, context_length - 1). Default is 2048.\n The total length of input tokens and generated tokens is limited by the model's context length.\n", | |
"default": 2048, | |
"examples": [ | |
2048 | |
] | |
}, | |
"presence_penalty": { | |
"type": "number", | |
"title": "Presence Penalty", | |
"description": " Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far,\n increasing the model's likelihood to talk about new topics.\n", | |
"default": 0, | |
"examples": [ | |
0 | |
] | |
}, | |
"frequency_penalty": { | |
"type": "number", | |
"title": "Frequency Penalty", | |
"description": " Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far,\n decreasing the model's likelihood to repeat the same line verbatim.\n", | |
"default": 0, | |
"examples": [ | |
0 | |
] | |
}, | |
"logit_bias": { | |
"type": "object", | |
"title": "Logit Bias", | |
"description": " Modify the likelihood of specified tokens appearing in the completion.\n Accepts a json object that maps tokens (specified by their token ID in the tokenizer)\n to an associated bias value from -100 to 100.\n Mathematically, the bias is added to the logits generated by the model prior to sampling.\n The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection;\n values like -100 or 100 should result in a ban or exclusive selection of the relevant token.\n", | |
"default": {}, | |
"examples": [ | |
{} | |
] | |
}, | |
"user": { | |
"type": "string", | |
"title": "User", | |
"description": "A unique identifier representing your end-user. For monitoring and debugging purposes.", | |
"default": "", | |
"examples": [ | |
"" | |
] | |
}, | |
"object": { | |
"type": "string", | |
"enum": [ | |
"gen_config.chat" | |
], | |
"const": "gen_config.chat", | |
"title": "Object", | |
"description": "The object type, which is always \"gen_config.chat\".", | |
"default": "gen_config.chat", | |
"examples": [ | |
"gen_config.chat" | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"messages" | |
], | |
"title": "ChatGenConfig" | |
}, | |
{ | |
"properties": { | |
"object": { | |
"type": "string", | |
"enum": [ | |
"gen_config.embed" | |
], | |
"const": "gen_config.embed", | |
"title": "Object", | |
"description": "The object type, which is always \"gen_config.embed\".", | |
"default": "gen_config.embed", | |
"examples": [ | |
"gen_config.embed" | |
] | |
}, | |
"embedding_model": { | |
"type": "string", | |
"title": "Embedding Model", | |
"description": "The embedding model to use.", | |
"examples": [ | |
"openai/text-embedding-3-small-512" | |
] | |
}, | |
"source_column": { | |
"type": "string", | |
"title": "Source Column", | |
"description": "The source column for embedding.", | |
"examples": [ | |
"text_column" | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"embedding_model", | |
"source_column" | |
], | |
"title": "EmbedGenConfig" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Gen Config", | |
"description": "_Optional_. Generation config. If provided, then this column will be an \"Output Column\". Table columns on its left can be referenced by `${column-name}`." | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"id" | |
], | |
"title": "ColumnSchema" | |
}, | |
"type": "array", | |
"title": "Cols", | |
"description": "List of column schema." | |
}, | |
"parent_id": { | |
"anyOf": [ | |
{ | |
"type": "string", | |
"pattern": "^[A-Za-z0-9]([A-Za-z0-9._-]{0,98}[A-Za-z0-9])?$", | |
"description": "Table name or ID. Must be unique with at least 1 character and up to 100 characters. Must start and end with an alphabet or number. Characters in the middle can include `_` (underscore), `-` (dash), `.` (dot)." | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Parent Id", | |
"description": "The parent table ID. If None (default), it means this is a template table." | |
}, | |
"title": { | |
"type": "string", | |
"title": "Title", | |
"description": "Chat title. Defaults to ''." | |
}, | |
"updated_at": { | |
"type": "string", | |
"title": "Updated At", | |
"description": "Table last update timestamp (ISO 8601 UTC)." | |
}, | |
"indexed_at_fts": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Indexed At Fts", | |
"description": "Table last FTS index timestamp (ISO 8601 UTC)." | |
}, | |
"indexed_at_vec": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Indexed At Vec", | |
"description": "Table last vector index timestamp (ISO 8601 UTC)." | |
}, | |
"indexed_at_sca": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Indexed At Sca", | |
"description": "Table last scalar index timestamp (ISO 8601 UTC)." | |
}, | |
"num_rows": { | |
"type": "integer", | |
"title": "Num Rows", | |
"description": "Number of rows in the table." | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"id", | |
"cols", | |
"parent_id", | |
"title", | |
"updated_at", | |
"indexed_at_fts", | |
"indexed_at_vec", | |
"indexed_at_sca", | |
"num_rows" | |
], | |
"title": "TableMetaResponse" | |
} | |
} | |
} | |
}, | |
"422": { | |
"description": "Validation Error", | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"detail": { | |
"items": { | |
"properties": { | |
"loc": { | |
"items": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "integer" | |
} | |
] | |
}, | |
"type": "array", | |
"title": "Location" | |
}, | |
"msg": { | |
"type": "string", | |
"title": "Message" | |
}, | |
"type": { | |
"type": "string", | |
"title": "Error Type" | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"loc", | |
"msg", | |
"type" | |
], | |
"title": "ValidationError" | |
}, | |
"type": "array", | |
"title": "Detail" | |
} | |
}, | |
"type": "object", | |
"title": "HTTPValidationError" | |
} | |
} | |
} | |
} | |
} | |
} | |
}, | |
"/api/v1/gen_tables/{table_type}/columns/reorder": { | |
"post": { | |
"tags": [ | |
"Generative Table" | |
], | |
"summary": "Reorder Columns", | |
"operationId": "reorder_columns", | |
"parameters": [ | |
{ | |
"name": "table_type", | |
"in": "path", | |
"required": true, | |
"schema": { | |
"allOf": [ | |
{ | |
"type": "string", | |
"enum": [ | |
"action", | |
"knowledge", | |
"chat" | |
], | |
"title": "TableType" | |
} | |
], | |
"description": "Table type.", | |
"title": "Table Type" | |
}, | |
"description": "Table type." | |
} | |
], | |
"requestBody": { | |
"required": true, | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"table_id": { | |
"type": "string", | |
"pattern": "^[A-Za-z0-9]([A-Za-z0-9._-]{0,98}[A-Za-z0-9])?$", | |
"title": "Table Id", | |
"description": "Table name or ID." | |
}, | |
"column_names": { | |
"items": { | |
"type": "string", | |
"pattern": "^[A-Za-z0-9]([A-Za-z0-9 _-]{0,98}[A-Za-z0-9])?$", | |
"description": "Column name or ID. Must be unique with at least 1 character and up to 100 characters. Must start and end with an alphabet or number. Characters in the middle can include `_` (underscore), `-` (dash), ` ` (space). Cannot be called \"ID\" or \"Updated at\" (case-insensitive)." | |
}, | |
"type": "array", | |
"title": "Column Names", | |
"description": "List of column ID in the desired order." | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"table_id", | |
"column_names" | |
], | |
"title": "ColumnReorderRequest" | |
} | |
} | |
} | |
}, | |
"responses": { | |
"200": { | |
"description": "Successful Response", | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"id": { | |
"type": "string", | |
"title": "Id", | |
"description": "Table name." | |
}, | |
"version": { | |
"type": "string", | |
"title": "Version", | |
"description": "Table version, following owl version.", | |
"default": "0.3.0" | |
}, | |
"meta": { | |
"type": "object", | |
"title": "Meta", | |
"description": "Additional metadata about the table.", | |
"default": {} | |
}, | |
"cols": { | |
"items": { | |
"properties": { | |
"id": { | |
"type": "string", | |
"title": "Id", | |
"description": "Column name." | |
}, | |
"dtype": { | |
"allOf": [ | |
{ | |
"type": "string", | |
"enum": [ | |
"int", | |
"int8", | |
"float", | |
"float32", | |
"float16", | |
"bool", | |
"str", | |
"date-time" | |
], | |
"title": "DtypeEnum" | |
} | |
], | |
"description": "Column data type, one of [\"int\", \"int8\", \"float\", \"float32\", \"float16\", \"bool\", \"str\", \"date-time\"]", | |
"default": "str" | |
}, | |
"vlen": { | |
"type": "integer", | |
"minimum": 0, | |
"title": "Vlen", | |
"description": "_Optional_. Vector length. If this is larger than zero, then `dtype` must be one of the floating data types. Defaults to zero.", | |
"default": 0 | |
}, | |
"index": { | |
"type": "boolean", | |
"title": "Index", | |
"description": "_Optional_. Whether to build full-text-search (FTS) or vector index for this column. Only applies to string and vector columns. Defaults to True.", | |
"default": true | |
}, | |
"gen_config": { | |
"anyOf": [ | |
{ | |
"properties": { | |
"id": { | |
"type": "string", | |
"title": "Id", | |
"description": "Chat ID. Must be unique against document ID for it to be embeddable. Defaults to ''.", | |
"default": "" | |
}, | |
"model": { | |
"type": "string", | |
"title": "Model", | |
"description": "ID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API.", | |
"default": "" | |
}, | |
"messages": { | |
"items": { | |
"properties": { | |
"role": { | |
"type": "string", | |
"enum": [ | |
"system", | |
"user", | |
"assistant" | |
], | |
"title": "ChatRole", | |
"description": "Represents who said a chat message." | |
}, | |
"content": { | |
"type": "string", | |
"title": "Content" | |
}, | |
"name": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Name" | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"role", | |
"content" | |
], | |
"title": "ChatEntry", | |
"description": "Represents a message in the chat context." | |
}, | |
"type": "array", | |
"minItems": 1, | |
"title": "Messages", | |
"description": "A list of messages comprising the conversation so far." | |
}, | |
"rag_params": { | |
"anyOf": [ | |
{ | |
"properties": { | |
"table_id": { | |
"type": "string", | |
"minLength": 2, | |
"title": "Table Id", | |
"description": "Knowledge Table ID", | |
"examples": [ | |
"my-dataset" | |
] | |
}, | |
"reranking_model": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Reranking Model", | |
"description": "Reranking model to use for hybrid search.", | |
"examples": [ | |
"cohere/rerank-multilingual-v3.0", | |
null | |
] | |
}, | |
"search_query": { | |
"type": "string", | |
"title": "Search Query", | |
"description": "Query used to retrieve items from the KB database. If not provided (default), it will be generated using LLM.", | |
"default": "" | |
}, | |
"k": { | |
"type": "integer", | |
"maximum": 1024, | |
"exclusiveMinimum": 0, | |
"title": "K", | |
"description": "Top-k closest text in terms of embedding distance. Must be in [1, 1024]. Defaults to 3.", | |
"default": 3, | |
"examples": [ | |
3 | |
] | |
}, | |
"rerank": { | |
"type": "boolean", | |
"title": "Rerank", | |
"description": "Flag to perform rerank on the retrieved results. Defaults to True.", | |
"default": true, | |
"examples": [ | |
true, | |
false | |
] | |
}, | |
"concat_reranker_input": { | |
"type": "boolean", | |
"title": "Concat Reranker Input", | |
"description": "Flag to concat title and content as reranker input. Defaults to False.", | |
"default": false, | |
"examples": [ | |
true, | |
false | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"table_id" | |
], | |
"title": "RAGParams" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"description": "Retrieval Augmented Generation search params. Defaults to None (disabled).", | |
"examples": [ | |
null | |
] | |
}, | |
"temperature": { | |
"type": "number", | |
"maximum": 2, | |
"minimum": 0.001, | |
"title": "Temperature", | |
"description": " What sampling temperature to use, in [0.001, 2.0].\n Higher values like 0.8 will make the output more random,\n while lower values like 0.2 will make it more focused and deterministic.\n", | |
"default": 0.2, | |
"examples": [ | |
0.2 | |
] | |
}, | |
"top_p": { | |
"type": "number", | |
"maximum": 1, | |
"minimum": 0.001, | |
"title": "Top P", | |
"description": " An alternative to sampling with temperature, called nucleus sampling,\n where the model considers the results of the tokens with top_p probability mass.\n So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n Must be in [0.001, 1.0].\n", | |
"default": 0.6, | |
"examples": [ | |
0.6 | |
] | |
}, | |
"n": { | |
"type": "integer", | |
"title": "N", | |
"description": "How many chat completion choices to generate for each input message.", | |
"default": 1, | |
"examples": [ | |
1 | |
] | |
}, | |
"stream": { | |
"type": "boolean", | |
"title": "Stream", | |
"description": "\n If set, partial message deltas will be sent, like in ChatGPT.\n Tokens will be sent as server-sent events as they become available,\n with the stream terminated by a 'data: [DONE]' message.\n", | |
"default": true, | |
"examples": [ | |
true | |
] | |
}, | |
"stop": { | |
"anyOf": [ | |
{ | |
"items": { | |
"type": "string" | |
}, | |
"type": "array" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Stop", | |
"description": "Up to 4 sequences where the API will stop generating further tokens.", | |
"examples": [ | |
null | |
] | |
}, | |
"max_tokens": { | |
"type": "integer", | |
"exclusiveMinimum": 0, | |
"title": "Max Tokens", | |
"description": " The maximum number of tokens to generate in the chat completion.\n Must be in [1, context_length - 1). Default is 2048.\n The total length of input tokens and generated tokens is limited by the model's context length.\n", | |
"default": 2048, | |
"examples": [ | |
2048 | |
] | |
}, | |
"presence_penalty": { | |
"type": "number", | |
"title": "Presence Penalty", | |
"description": " Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far,\n increasing the model's likelihood to talk about new topics.\n", | |
"default": 0, | |
"examples": [ | |
0 | |
] | |
}, | |
"frequency_penalty": { | |
"type": "number", | |
"title": "Frequency Penalty", | |
"description": " Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far,\n decreasing the model's likelihood to repeat the same line verbatim.\n", | |
"default": 0, | |
"examples": [ | |
0 | |
] | |
}, | |
"logit_bias": { | |
"type": "object", | |
"title": "Logit Bias", | |
"description": " Modify the likelihood of specified tokens appearing in the completion.\n Accepts a json object that maps tokens (specified by their token ID in the tokenizer)\n to an associated bias value from -100 to 100.\n Mathematically, the bias is added to the logits generated by the model prior to sampling.\n The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection;\n values like -100 or 100 should result in a ban or exclusive selection of the relevant token.\n", | |
"default": {}, | |
"examples": [ | |
{} | |
] | |
}, | |
"user": { | |
"type": "string", | |
"title": "User", | |
"description": "A unique identifier representing your end-user. For monitoring and debugging purposes.", | |
"default": "", | |
"examples": [ | |
"" | |
] | |
}, | |
"object": { | |
"type": "string", | |
"enum": [ | |
"gen_config.chat" | |
], | |
"const": "gen_config.chat", | |
"title": "Object", | |
"description": "The object type, which is always \"gen_config.chat\".", | |
"default": "gen_config.chat", | |
"examples": [ | |
"gen_config.chat" | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"messages" | |
], | |
"title": "ChatGenConfig" | |
}, | |
{ | |
"properties": { | |
"object": { | |
"type": "string", | |
"enum": [ | |
"gen_config.embed" | |
], | |
"const": "gen_config.embed", | |
"title": "Object", | |
"description": "The object type, which is always \"gen_config.embed\".", | |
"default": "gen_config.embed", | |
"examples": [ | |
"gen_config.embed" | |
] | |
}, | |
"embedding_model": { | |
"type": "string", | |
"title": "Embedding Model", | |
"description": "The embedding model to use.", | |
"examples": [ | |
"openai/text-embedding-3-small-512" | |
] | |
}, | |
"source_column": { | |
"type": "string", | |
"title": "Source Column", | |
"description": "The source column for embedding.", | |
"examples": [ | |
"text_column" | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"embedding_model", | |
"source_column" | |
], | |
"title": "EmbedGenConfig" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Gen Config", | |
"description": "_Optional_. Generation config. If provided, then this column will be an \"Output Column\". Table columns on its left can be referenced by `${column-name}`." | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"id" | |
], | |
"title": "ColumnSchema" | |
}, | |
"type": "array", | |
"title": "Cols", | |
"description": "List of column schema." | |
}, | |
"parent_id": { | |
"anyOf": [ | |
{ | |
"type": "string", | |
"pattern": "^[A-Za-z0-9]([A-Za-z0-9._-]{0,98}[A-Za-z0-9])?$", | |
"description": "Table name or ID. Must be unique with at least 1 character and up to 100 characters. Must start and end with an alphabet or number. Characters in the middle can include `_` (underscore), `-` (dash), `.` (dot)." | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Parent Id", | |
"description": "The parent table ID. If None (default), it means this is a template table." | |
}, | |
"title": { | |
"type": "string", | |
"title": "Title", | |
"description": "Chat title. Defaults to ''." | |
}, | |
"updated_at": { | |
"type": "string", | |
"title": "Updated At", | |
"description": "Table last update timestamp (ISO 8601 UTC)." | |
}, | |
"indexed_at_fts": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Indexed At Fts", | |
"description": "Table last FTS index timestamp (ISO 8601 UTC)." | |
}, | |
"indexed_at_vec": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Indexed At Vec", | |
"description": "Table last vector index timestamp (ISO 8601 UTC)." | |
}, | |
"indexed_at_sca": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Indexed At Sca", | |
"description": "Table last scalar index timestamp (ISO 8601 UTC)." | |
}, | |
"num_rows": { | |
"type": "integer", | |
"title": "Num Rows", | |
"description": "Number of rows in the table." | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"id", | |
"cols", | |
"parent_id", | |
"title", | |
"updated_at", | |
"indexed_at_fts", | |
"indexed_at_vec", | |
"indexed_at_sca", | |
"num_rows" | |
], | |
"title": "TableMetaResponse" | |
} | |
} | |
} | |
}, | |
"422": { | |
"description": "Validation Error", | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"detail": { | |
"items": { | |
"properties": { | |
"loc": { | |
"items": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "integer" | |
} | |
] | |
}, | |
"type": "array", | |
"title": "Location" | |
}, | |
"msg": { | |
"type": "string", | |
"title": "Message" | |
}, | |
"type": { | |
"type": "string", | |
"title": "Error Type" | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"loc", | |
"msg", | |
"type" | |
], | |
"title": "ValidationError" | |
}, | |
"type": "array", | |
"title": "Detail" | |
} | |
}, | |
"type": "object", | |
"title": "HTTPValidationError" | |
} | |
} | |
} | |
} | |
} | |
} | |
}, | |
"/api/v1/gen_tables/{table_type}/{table_id}/rows": { | |
"get": { | |
"tags": [ | |
"Generative Table" | |
], | |
"summary": "List Rows", | |
"operationId": "list_rows", | |
"parameters": [ | |
{ | |
"name": "table_type", | |
"in": "path", | |
"required": true, | |
"schema": { | |
"allOf": [ | |
{ | |
"type": "string", | |
"enum": [ | |
"action", | |
"knowledge", | |
"chat" | |
], | |
"title": "TableType" | |
} | |
], | |
"description": "Table type.", | |
"title": "Table Type" | |
}, | |
"description": "Table type." | |
}, | |
{ | |
"name": "table_id", | |
"in": "path", | |
"required": true, | |
"schema": { | |
"type": "string", | |
"pattern": "^[A-Za-z0-9]([A-Za-z0-9._-]{0,98}[A-Za-z0-9])?$", | |
"description": "Table ID or name.", | |
"title": "Table Id" | |
}, | |
"description": "Table ID or name." | |
}, | |
{ | |
"name": "offset", | |
"in": "query", | |
"required": false, | |
"schema": { | |
"type": "integer", | |
"minimum": 0, | |
"description": "_Optional_. Item offset for pagination. Defaults to 0.", | |
"default": 0, | |
"title": "Offset" | |
}, | |
"description": "_Optional_. Item offset for pagination. Defaults to 0." | |
}, | |
{ | |
"name": "limit", | |
"in": "query", | |
"required": false, | |
"schema": { | |
"type": "integer", | |
"maximum": 100, | |
"exclusiveMinimum": 0, | |
"description": "_Optional_. Number of rows to return (min 1, max 100). Defaults to 100.", | |
"default": 100, | |
"title": "Limit" | |
}, | |
"description": "_Optional_. Number of rows to return (min 1, max 100). Defaults to 100." | |
}, | |
{ | |
"name": "search_query", | |
"in": "query", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"maxLength": 10000, | |
"description": "_Optional_. A string to search for within the rows as a filter. Defaults to \"\" (no filter).", | |
"default": "", | |
"title": "Search Query" | |
}, | |
"description": "_Optional_. A string to search for within the rows as a filter. Defaults to \"\" (no filter)." | |
}, | |
{ | |
"name": "columns", | |
"in": "query", | |
"required": false, | |
"schema": { | |
"anyOf": [ | |
{ | |
"type": "array", | |
"items": { | |
"type": "string", | |
"pattern": "^[A-Za-z0-9]([A-Za-z0-9 _-]{0,98}[A-Za-z0-9])?$", | |
"description": "Column name or ID. Must be unique with at least 1 character and up to 100 characters. Must start and end with an alphabet or number. Characters in the middle can include `_` (underscore), `-` (dash), ` ` (space). Cannot be called \"ID\" or \"Updated at\" (case-insensitive)." | |
} | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"description": "_Optional_. A list of column names to include in the response. Default is to return all columns.", | |
"title": "Columns" | |
}, | |
"description": "_Optional_. A list of column names to include in the response. Default is to return all columns." | |
}, | |
{ | |
"name": "float_decimals", | |
"in": "query", | |
"required": false, | |
"schema": { | |
"type": "integer", | |
"minimum": 0, | |
"description": "_Optional_. Number of decimals for float values. Defaults to 0 (no rounding).", | |
"default": 0, | |
"title": "Float Decimals" | |
}, | |
"description": "_Optional_. Number of decimals for float values. Defaults to 0 (no rounding)." | |
}, | |
{ | |
"name": "vec_decimals", | |
"in": "query", | |
"required": false, | |
"schema": { | |
"type": "integer", | |
"description": "_Optional_. Number of decimals for vectors. If its negative, exclude vector columns. Defaults to 0 (no rounding).", | |
"default": 0, | |
"title": "Vec Decimals" | |
}, | |
"description": "_Optional_. Number of decimals for vectors. If its negative, exclude vector columns. Defaults to 0 (no rounding)." | |
}, | |
{ | |
"name": "order_descending", | |
"in": "query", | |
"required": false, | |
"schema": { | |
"type": "boolean", | |
"description": "_Optional_. Whether to sort by descending order. Defaults to True.", | |
"default": true, | |
"title": "Order Descending" | |
}, | |
"description": "_Optional_. Whether to sort by descending order. Defaults to True." | |
} | |
], | |
"responses": { | |
"200": { | |
"description": "Successful Response", | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"items": { | |
"items": { | |
"patternProperties": { | |
"^[A-Za-z0-9]([A-Za-z0-9 _-]{0,98}[A-Za-z0-9])?$": {} | |
}, | |
"type": "object" | |
}, | |
"type": "array", | |
"title": "Items", | |
"description": "List of items paginated items.", | |
"default": [], | |
"examples": [ | |
[] | |
] | |
}, | |
"offset": { | |
"type": "integer", | |
"title": "Offset", | |
"description": "Number of skipped items.", | |
"default": 0, | |
"examples": [ | |
0 | |
] | |
}, | |
"limit": { | |
"type": "integer", | |
"title": "Limit", | |
"description": "Number of items per page.", | |
"default": 0, | |
"examples": [ | |
0 | |
] | |
}, | |
"total": { | |
"type": "integer", | |
"title": "Total", | |
"description": "Total number of items.", | |
"default": 0, | |
"examples": [ | |
0 | |
] | |
} | |
}, | |
"type": "object", | |
"title": "Page[dict[Annotated[str, FieldInfo(annotation=NoneType, required=True, description='Column name or ID. Must be unique with at least 1 character and up to 100 characters. Must start and end with an alphabet or number. Characters in the middle can include `_` (underscore), `-` (dash), ` ` (space). Cannot be called \"ID\" or \"Updated at\" (case-insensitive).', metadata=[_PydanticGeneralMetadata(pattern='^[A-Za-z0-9]([A-Za-z0-9 _-]{0,98}[A-Za-z0-9])?$')])], Any]]" | |
} | |
} | |
} | |
}, | |
"422": { | |
"description": "Validation Error", | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"detail": { | |
"items": { | |
"properties": { | |
"loc": { | |
"items": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "integer" | |
} | |
] | |
}, | |
"type": "array", | |
"title": "Location" | |
}, | |
"msg": { | |
"type": "string", | |
"title": "Message" | |
}, | |
"type": { | |
"type": "string", | |
"title": "Error Type" | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"loc", | |
"msg", | |
"type" | |
], | |
"title": "ValidationError" | |
}, | |
"type": "array", | |
"title": "Detail" | |
} | |
}, | |
"type": "object", | |
"title": "HTTPValidationError" | |
} | |
} | |
} | |
} | |
} | |
} | |
}, | |
"/api/v1/gen_tables/{table_type}/{table_id}/rows/{row_id}": { | |
"get": { | |
"tags": [ | |
"Generative Table" | |
], | |
"summary": "Get Row", | |
"operationId": "get_row", | |
"parameters": [ | |
{ | |
"name": "table_type", | |
"in": "path", | |
"required": true, | |
"schema": { | |
"allOf": [ | |
{ | |
"type": "string", | |
"enum": [ | |
"action", | |
"knowledge", | |
"chat" | |
], | |
"title": "TableType" | |
} | |
], | |
"description": "Table type.", | |
"title": "Table Type" | |
}, | |
"description": "Table type." | |
}, | |
{ | |
"name": "table_id", | |
"in": "path", | |
"required": true, | |
"schema": { | |
"type": "string", | |
"pattern": "^[A-Za-z0-9]([A-Za-z0-9._-]{0,98}[A-Za-z0-9])?$", | |
"description": "Table ID or name.", | |
"title": "Table Id" | |
}, | |
"description": "Table ID or name." | |
}, | |
{ | |
"name": "row_id", | |
"in": "path", | |
"required": true, | |
"schema": { | |
"type": "string", | |
"description": "The ID of the specific row to fetch.", | |
"title": "Row Id" | |
}, | |
"description": "The ID of the specific row to fetch." | |
}, | |
{ | |
"name": "columns", | |
"in": "query", | |
"required": false, | |
"schema": { | |
"anyOf": [ | |
{ | |
"type": "array", | |
"items": { | |
"type": "string", | |
"pattern": "^[A-Za-z0-9]([A-Za-z0-9 _-]{0,98}[A-Za-z0-9])?$", | |
"description": "Column name or ID. Must be unique with at least 1 character and up to 100 characters. Must start and end with an alphabet or number. Characters in the middle can include `_` (underscore), `-` (dash), ` ` (space). Cannot be called \"ID\" or \"Updated at\" (case-insensitive)." | |
} | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"description": "_Optional_. A list of column names to include in the response. Default is to return all columns.", | |
"title": "Columns" | |
}, | |
"description": "_Optional_. A list of column names to include in the response. Default is to return all columns." | |
}, | |
{ | |
"name": "float_decimals", | |
"in": "query", | |
"required": false, | |
"schema": { | |
"type": "integer", | |
"minimum": 0, | |
"description": "_Optional_. Number of decimals for float values. Defaults to 0 (no rounding).", | |
"default": 0, | |
"title": "Float Decimals" | |
}, | |
"description": "_Optional_. Number of decimals for float values. Defaults to 0 (no rounding)." | |
}, | |
{ | |
"name": "vec_decimals", | |
"in": "query", | |
"required": false, | |
"schema": { | |
"type": "integer", | |
"description": "_Optional_. Number of decimals for vectors. If its negative, exclude vector columns. Defaults to 0 (no rounding).", | |
"default": 0, | |
"title": "Vec Decimals" | |
}, | |
"description": "_Optional_. Number of decimals for vectors. If its negative, exclude vector columns. Defaults to 0 (no rounding)." | |
} | |
], | |
"responses": { | |
"200": { | |
"description": "Successful Response", | |
"content": { | |
"application/json": { | |
"schema": { | |
"type": "object", | |
"patternProperties": { | |
"^[A-Za-z0-9]([A-Za-z0-9 _-]{0,98}[A-Za-z0-9])?$": {} | |
}, | |
"title": "Response Get Row" | |
} | |
} | |
} | |
}, | |
"422": { | |
"description": "Validation Error", | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"detail": { | |
"items": { | |
"properties": { | |
"loc": { | |
"items": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "integer" | |
} | |
] | |
}, | |
"type": "array", | |
"title": "Location" | |
}, | |
"msg": { | |
"type": "string", | |
"title": "Message" | |
}, | |
"type": { | |
"type": "string", | |
"title": "Error Type" | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"loc", | |
"msg", | |
"type" | |
], | |
"title": "ValidationError" | |
}, | |
"type": "array", | |
"title": "Detail" | |
} | |
}, | |
"type": "object", | |
"title": "HTTPValidationError" | |
} | |
} | |
} | |
} | |
} | |
}, | |
"delete": { | |
"tags": [ | |
"Generative Table" | |
], | |
"summary": "Delete Row", | |
"operationId": "delete_row", | |
"parameters": [ | |
{ | |
"name": "table_type", | |
"in": "path", | |
"required": true, | |
"schema": { | |
"allOf": [ | |
{ | |
"type": "string", | |
"enum": [ | |
"action", | |
"knowledge", | |
"chat" | |
], | |
"title": "TableType" | |
} | |
], | |
"description": "Table type.", | |
"title": "Table Type" | |
}, | |
"description": "Table type." | |
}, | |
{ | |
"name": "table_id", | |
"in": "path", | |
"required": true, | |
"schema": { | |
"type": "string", | |
"pattern": "^[A-Za-z0-9]([A-Za-z0-9._-]{0,98}[A-Za-z0-9])?$", | |
"description": "Table ID or name.", | |
"title": "Table Id" | |
}, | |
"description": "Table ID or name." | |
}, | |
{ | |
"name": "row_id", | |
"in": "path", | |
"required": true, | |
"schema": { | |
"type": "string", | |
"description": "The ID of the specific row to delete.", | |
"title": "Row Id" | |
}, | |
"description": "The ID of the specific row to delete." | |
}, | |
{ | |
"name": "reindex", | |
"in": "query", | |
"required": false, | |
"schema": { | |
"type": "boolean", | |
"description": "Whether to reindex immediately.", | |
"default": true, | |
"title": "Reindex" | |
}, | |
"description": "Whether to reindex immediately." | |
} | |
], | |
"responses": { | |
"200": { | |
"description": "Successful Response", | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"ok": { | |
"type": "boolean", | |
"title": "Ok", | |
"default": true | |
} | |
}, | |
"type": "object", | |
"title": "OkResponse" | |
} | |
} | |
} | |
}, | |
"422": { | |
"description": "Validation Error", | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"detail": { | |
"items": { | |
"properties": { | |
"loc": { | |
"items": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "integer" | |
} | |
] | |
}, | |
"type": "array", | |
"title": "Location" | |
}, | |
"msg": { | |
"type": "string", | |
"title": "Message" | |
}, | |
"type": { | |
"type": "string", | |
"title": "Error Type" | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"loc", | |
"msg", | |
"type" | |
], | |
"title": "ValidationError" | |
}, | |
"type": "array", | |
"title": "Detail" | |
} | |
}, | |
"type": "object", | |
"title": "HTTPValidationError" | |
} | |
} | |
} | |
} | |
} | |
} | |
}, | |
"/api/v1/gen_tables/{table_type}/rows/add": { | |
"post": { | |
"tags": [ | |
"Generative Table" | |
], | |
"summary": "Add Rows", | |
"operationId": "add_rows", | |
"parameters": [ | |
{ | |
"name": "table_type", | |
"in": "path", | |
"required": true, | |
"schema": { | |
"allOf": [ | |
{ | |
"type": "string", | |
"enum": [ | |
"action", | |
"knowledge", | |
"chat" | |
], | |
"title": "TableType" | |
} | |
], | |
"description": "Table type.", | |
"title": "Table Type" | |
}, | |
"description": "Table type." | |
}, | |
{ | |
"name": "openai-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "OpenAI API key.", | |
"default": "", | |
"title": "Openai-Api-Key" | |
}, | |
"description": "OpenAI API key." | |
}, | |
{ | |
"name": "anthropic-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Anthropic API key.", | |
"default": "", | |
"title": "Anthropic-Api-Key" | |
}, | |
"description": "Anthropic API key." | |
}, | |
{ | |
"name": "gemini-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Google Gemini API key.", | |
"default": "", | |
"title": "Gemini-Api-Key" | |
}, | |
"description": "Google Gemini API key." | |
}, | |
{ | |
"name": "cohere-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Cohere API key.", | |
"default": "", | |
"title": "Cohere-Api-Key" | |
}, | |
"description": "Cohere API key." | |
}, | |
{ | |
"name": "groq-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Groq API key.", | |
"default": "", | |
"title": "Groq-Api-Key" | |
}, | |
"description": "Groq API key." | |
}, | |
{ | |
"name": "together-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Together AI API key.", | |
"default": "", | |
"title": "Together-Api-Key" | |
}, | |
"description": "Together AI API key." | |
}, | |
{ | |
"name": "jina-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Jina API key.", | |
"default": "", | |
"title": "Jina-Api-Key" | |
}, | |
"description": "Jina API key." | |
}, | |
{ | |
"name": "voyage-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Voyage API key.", | |
"default": "", | |
"title": "Voyage-Api-Key" | |
}, | |
"description": "Voyage API key." | |
} | |
], | |
"requestBody": { | |
"required": true, | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"table_id": { | |
"type": "string", | |
"pattern": "^[A-Za-z0-9]([A-Za-z0-9._-]{0,98}[A-Za-z0-9])?$", | |
"title": "Table Id", | |
"description": "Table name or ID." | |
}, | |
"data": { | |
"items": { | |
"patternProperties": { | |
"^[A-Za-z0-9]([A-Za-z0-9 _-]{0,98}[A-Za-z0-9])?$": {} | |
}, | |
"type": "object" | |
}, | |
"type": "array", | |
"maxItems": 100, | |
"minItems": 1, | |
"title": "Data", | |
"description": "List of mapping of column names to its value. In other words, each item in the list is a row, and each item is a mapping. Minimum 1 row, maximum 100 rows." | |
}, | |
"stream": { | |
"type": "boolean", | |
"title": "Stream", | |
"description": "Whether or not to stream the LLM generation.", | |
"default": true | |
}, | |
"reindex": { | |
"anyOf": [ | |
{ | |
"type": "boolean" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Reindex", | |
"description": "_Optional_. If True, reindex immediately. If False, wait until next periodic reindex. If None (default), reindex immediately for smaller tables." | |
}, | |
"concurrent": { | |
"type": "boolean", | |
"title": "Concurrent", | |
"description": "_Optional_. Whether or not to concurrently generate the output rows and columns.", | |
"default": true | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"table_id", | |
"data" | |
], | |
"title": "RowAddRequestWithLimit" | |
} | |
} | |
} | |
}, | |
"responses": { | |
"200": { | |
"description": "Successful Response", | |
"content": { | |
"application/json": { | |
"schema": {} | |
} | |
} | |
}, | |
"422": { | |
"description": "Validation Error", | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"detail": { | |
"items": { | |
"properties": { | |
"loc": { | |
"items": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "integer" | |
} | |
] | |
}, | |
"type": "array", | |
"title": "Location" | |
}, | |
"msg": { | |
"type": "string", | |
"title": "Message" | |
}, | |
"type": { | |
"type": "string", | |
"title": "Error Type" | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"loc", | |
"msg", | |
"type" | |
], | |
"title": "ValidationError" | |
}, | |
"type": "array", | |
"title": "Detail" | |
} | |
}, | |
"type": "object", | |
"title": "HTTPValidationError" | |
} | |
} | |
} | |
} | |
} | |
} | |
}, | |
"/api/v1/gen_tables/{table_type}/rows/regen": { | |
"post": { | |
"tags": [ | |
"Generative Table" | |
], | |
"summary": "Regen Rows", | |
"operationId": "regen_rows", | |
"parameters": [ | |
{ | |
"name": "table_type", | |
"in": "path", | |
"required": true, | |
"schema": { | |
"allOf": [ | |
{ | |
"type": "string", | |
"enum": [ | |
"action", | |
"knowledge", | |
"chat" | |
], | |
"title": "TableType" | |
} | |
], | |
"description": "Table type.", | |
"title": "Table Type" | |
}, | |
"description": "Table type." | |
}, | |
{ | |
"name": "openai-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "OpenAI API key.", | |
"default": "", | |
"title": "Openai-Api-Key" | |
}, | |
"description": "OpenAI API key." | |
}, | |
{ | |
"name": "anthropic-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Anthropic API key.", | |
"default": "", | |
"title": "Anthropic-Api-Key" | |
}, | |
"description": "Anthropic API key." | |
}, | |
{ | |
"name": "gemini-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Google Gemini API key.", | |
"default": "", | |
"title": "Gemini-Api-Key" | |
}, | |
"description": "Google Gemini API key." | |
}, | |
{ | |
"name": "cohere-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Cohere API key.", | |
"default": "", | |
"title": "Cohere-Api-Key" | |
}, | |
"description": "Cohere API key." | |
}, | |
{ | |
"name": "groq-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Groq API key.", | |
"default": "", | |
"title": "Groq-Api-Key" | |
}, | |
"description": "Groq API key." | |
}, | |
{ | |
"name": "together-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Together AI API key.", | |
"default": "", | |
"title": "Together-Api-Key" | |
}, | |
"description": "Together AI API key." | |
}, | |
{ | |
"name": "jina-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Jina API key.", | |
"default": "", | |
"title": "Jina-Api-Key" | |
}, | |
"description": "Jina API key." | |
}, | |
{ | |
"name": "voyage-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Voyage API key.", | |
"default": "", | |
"title": "Voyage-Api-Key" | |
}, | |
"description": "Voyage API key." | |
} | |
], | |
"requestBody": { | |
"required": true, | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"table_id": { | |
"type": "string", | |
"pattern": "^[A-Za-z0-9]([A-Za-z0-9._-]{0,98}[A-Za-z0-9])?$", | |
"title": "Table Id", | |
"description": "Table name or ID." | |
}, | |
"row_ids": { | |
"items": { | |
"type": "string" | |
}, | |
"type": "array", | |
"maxItems": 100, | |
"minItems": 1, | |
"title": "Row Ids", | |
"description": "List of ID of the row to regenerate. Minimum 1 row, maximum 100 rows." | |
}, | |
"regen_strategy": { | |
"allOf": [ | |
{ | |
"type": "string", | |
"enum": [ | |
"run_all", | |
"run_before", | |
"run_selected", | |
"run_after" | |
], | |
"title": "RegenStrategy", | |
"description": "Strategies for selecting columns during row regeneration." | |
} | |
], | |
"description": "_Optional_. Strategy for selecting columns to regenerate.Choose `run_all` to regenerate all columns in the specified row; Choose `run_before` to regenerate columns up to the specified column_id; Choose `run_selected` to regenerate only the specified column_id; Choose `run_after` to regenerate columns starting from the specified column_id;", | |
"default": "run_all" | |
}, | |
"output_column_id": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Output Column Id", | |
"description": "_Optional_. Output column name to indicate the starting or ending point of regen for `run_before`, `run_selected` and `run_after` strategies. Required if `regen_strategy` is not 'run_all'. Given columns are 'C1', 'C2', 'C3' and 'C4', if column_id is 'C3': `run_before` regenerate columns 'C1', 'C2' and 'C3'; `run_selected` regenerate only column 'C3'; `run_after` regenerate columns 'C3' and 'C4';" | |
}, | |
"stream": { | |
"type": "boolean", | |
"title": "Stream", | |
"description": "Whether or not to stream the LLM generation." | |
}, | |
"reindex": { | |
"anyOf": [ | |
{ | |
"type": "boolean" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Reindex", | |
"description": "_Optional_. If True, reindex immediately. If False, wait until next periodic reindex. If None (default), reindex immediately for smaller tables." | |
}, | |
"concurrent": { | |
"type": "boolean", | |
"title": "Concurrent", | |
"description": "_Optional_. Whether or not to concurrently generate the output rows and columns.", | |
"default": true | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"table_id", | |
"row_ids", | |
"stream" | |
], | |
"title": "RowRegenRequest" | |
} | |
} | |
} | |
}, | |
"responses": { | |
"200": { | |
"description": "Successful Response", | |
"content": { | |
"application/json": { | |
"schema": {} | |
} | |
} | |
}, | |
"422": { | |
"description": "Validation Error", | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"detail": { | |
"items": { | |
"properties": { | |
"loc": { | |
"items": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "integer" | |
} | |
] | |
}, | |
"type": "array", | |
"title": "Location" | |
}, | |
"msg": { | |
"type": "string", | |
"title": "Message" | |
}, | |
"type": { | |
"type": "string", | |
"title": "Error Type" | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"loc", | |
"msg", | |
"type" | |
], | |
"title": "ValidationError" | |
}, | |
"type": "array", | |
"title": "Detail" | |
} | |
}, | |
"type": "object", | |
"title": "HTTPValidationError" | |
} | |
} | |
} | |
} | |
} | |
} | |
}, | |
"/api/v1/gen_tables/{table_type}/rows/update": { | |
"post": { | |
"tags": [ | |
"Generative Table" | |
], | |
"summary": "Update Row", | |
"operationId": "update_row", | |
"parameters": [ | |
{ | |
"name": "table_type", | |
"in": "path", | |
"required": true, | |
"schema": { | |
"allOf": [ | |
{ | |
"type": "string", | |
"enum": [ | |
"action", | |
"knowledge", | |
"chat" | |
], | |
"title": "TableType" | |
} | |
], | |
"description": "Table type.", | |
"title": "Table Type" | |
}, | |
"description": "Table type." | |
} | |
], | |
"requestBody": { | |
"required": true, | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"table_id": { | |
"type": "string", | |
"pattern": "^[A-Za-z0-9]([A-Za-z0-9._-]{0,98}[A-Za-z0-9])?$", | |
"title": "Table Id", | |
"description": "Table name or ID." | |
}, | |
"row_id": { | |
"type": "string", | |
"title": "Row Id", | |
"description": "ID of the row to update." | |
}, | |
"data": { | |
"patternProperties": { | |
"^[A-Za-z0-9]([A-Za-z0-9 _-]{0,98}[A-Za-z0-9])?$": {} | |
}, | |
"type": "object", | |
"title": "Data", | |
"description": "Mapping of column names to its value." | |
}, | |
"reindex": { | |
"anyOf": [ | |
{ | |
"type": "boolean" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Reindex", | |
"description": "_Optional_. If True, reindex immediately. If False, wait until next periodic reindex. If None (default), reindex immediately for smaller tables." | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"table_id", | |
"row_id", | |
"data" | |
], | |
"title": "RowUpdateRequest" | |
} | |
} | |
} | |
}, | |
"responses": { | |
"200": { | |
"description": "Successful Response", | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"ok": { | |
"type": "boolean", | |
"title": "Ok", | |
"default": true | |
} | |
}, | |
"type": "object", | |
"title": "OkResponse" | |
} | |
} | |
} | |
}, | |
"422": { | |
"description": "Validation Error", | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"detail": { | |
"items": { | |
"properties": { | |
"loc": { | |
"items": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "integer" | |
} | |
] | |
}, | |
"type": "array", | |
"title": "Location" | |
}, | |
"msg": { | |
"type": "string", | |
"title": "Message" | |
}, | |
"type": { | |
"type": "string", | |
"title": "Error Type" | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"loc", | |
"msg", | |
"type" | |
], | |
"title": "ValidationError" | |
}, | |
"type": "array", | |
"title": "Detail" | |
} | |
}, | |
"type": "object", | |
"title": "HTTPValidationError" | |
} | |
} | |
} | |
} | |
} | |
} | |
}, | |
"/api/v1/gen_tables/{table_type}/rows/delete": { | |
"post": { | |
"tags": [ | |
"Generative Table" | |
], | |
"summary": "Delete Rows", | |
"operationId": "delete_rows", | |
"parameters": [ | |
{ | |
"name": "table_type", | |
"in": "path", | |
"required": true, | |
"schema": { | |
"allOf": [ | |
{ | |
"type": "string", | |
"enum": [ | |
"action", | |
"knowledge", | |
"chat" | |
], | |
"title": "TableType" | |
} | |
], | |
"description": "Table type.", | |
"title": "Table Type" | |
}, | |
"description": "Table type." | |
} | |
], | |
"requestBody": { | |
"required": true, | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"table_id": { | |
"type": "string", | |
"pattern": "^[A-Za-z0-9]([A-Za-z0-9._-]{0,98}[A-Za-z0-9])?$", | |
"title": "Table Id", | |
"description": "Table name or ID." | |
}, | |
"row_ids": { | |
"anyOf": [ | |
{ | |
"items": { | |
"type": "string" | |
}, | |
"type": "array", | |
"maxItems": 100, | |
"minItems": 1 | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Row Ids", | |
"description": "List of ID of the row to delete. Minimum 1 row, maximum 100 rows." | |
}, | |
"where": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Where", | |
"description": "_Optional_. SQL where clause. If not provided, will match all rows and thus deleting all table content." | |
}, | |
"reindex": { | |
"anyOf": [ | |
{ | |
"type": "boolean" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Reindex", | |
"description": "_Optional_. If True, reindex immediately. If False, wait until next periodic reindex. If None (default), reindex immediately for smaller tables." | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"table_id" | |
], | |
"title": "RowDeleteRequest" | |
} | |
} | |
} | |
}, | |
"responses": { | |
"200": { | |
"description": "Successful Response", | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"ok": { | |
"type": "boolean", | |
"title": "Ok", | |
"default": true | |
} | |
}, | |
"type": "object", | |
"title": "OkResponse" | |
} | |
} | |
} | |
}, | |
"422": { | |
"description": "Validation Error", | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"detail": { | |
"items": { | |
"properties": { | |
"loc": { | |
"items": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "integer" | |
} | |
] | |
}, | |
"type": "array", | |
"title": "Location" | |
}, | |
"msg": { | |
"type": "string", | |
"title": "Message" | |
}, | |
"type": { | |
"type": "string", | |
"title": "Error Type" | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"loc", | |
"msg", | |
"type" | |
], | |
"title": "ValidationError" | |
}, | |
"type": "array", | |
"title": "Detail" | |
} | |
}, | |
"type": "object", | |
"title": "HTTPValidationError" | |
} | |
} | |
} | |
} | |
} | |
} | |
}, | |
"/api/v1/gen_tables/chat/{table_id}/thread": { | |
"get": { | |
"tags": [ | |
"Generative Table" | |
], | |
"summary": "Get Conversation Thread", | |
"operationId": "get_conversation_thread", | |
"parameters": [ | |
{ | |
"name": "table_id", | |
"in": "path", | |
"required": true, | |
"schema": { | |
"type": "string", | |
"pattern": "^[A-Za-z0-9]([A-Za-z0-9._-]{0,98}[A-Za-z0-9])?$", | |
"description": "Table ID or name.", | |
"title": "Table Id" | |
}, | |
"description": "Table ID or name." | |
}, | |
{ | |
"name": "row_id", | |
"in": "query", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "_Optional_. Row ID for filtering. Defaults to \"\" (export all rows).", | |
"default": "", | |
"title": "Row Id" | |
}, | |
"description": "_Optional_. Row ID for filtering. Defaults to \"\" (export all rows)." | |
}, | |
{ | |
"name": "include", | |
"in": "query", | |
"required": false, | |
"schema": { | |
"type": "boolean", | |
"description": "_Optional_. Whether to include the row specified by `row_id`. Defaults to True.", | |
"default": true, | |
"title": "Include" | |
}, | |
"description": "_Optional_. Whether to include the row specified by `row_id`. Defaults to True." | |
} | |
], | |
"responses": { | |
"200": { | |
"description": "Successful Response", | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"object": { | |
"type": "string", | |
"title": "Object", | |
"description": "Type of API response object.", | |
"default": "chat.thread", | |
"examples": [ | |
"chat.thread" | |
] | |
}, | |
"thread": { | |
"items": { | |
"properties": { | |
"role": { | |
"type": "string", | |
"enum": [ | |
"system", | |
"user", | |
"assistant" | |
], | |
"title": "ChatRole", | |
"description": "Represents who said a chat message." | |
}, | |
"content": { | |
"type": "string", | |
"title": "Content" | |
}, | |
"name": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Name" | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"role", | |
"content" | |
], | |
"title": "ChatEntry", | |
"description": "Represents a message in the chat context." | |
}, | |
"type": "array", | |
"title": "Thread", | |
"description": "List of chat messages.", | |
"default": [], | |
"examples": [ | |
[ | |
{ | |
"content": "You are an assistant.", | |
"role": "system" | |
}, | |
{ | |
"content": "Hello.", | |
"role": "user" | |
} | |
] | |
] | |
} | |
}, | |
"type": "object", | |
"title": "ChatThread" | |
} | |
} | |
} | |
}, | |
"422": { | |
"description": "Validation Error", | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"detail": { | |
"items": { | |
"properties": { | |
"loc": { | |
"items": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "integer" | |
} | |
] | |
}, | |
"type": "array", | |
"title": "Location" | |
}, | |
"msg": { | |
"type": "string", | |
"title": "Message" | |
}, | |
"type": { | |
"type": "string", | |
"title": "Error Type" | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"loc", | |
"msg", | |
"type" | |
], | |
"title": "ValidationError" | |
}, | |
"type": "array", | |
"title": "Detail" | |
} | |
}, | |
"type": "object", | |
"title": "HTTPValidationError" | |
} | |
} | |
} | |
} | |
} | |
} | |
}, | |
"/api/v1/gen_tables/{table_type}/hybrid_search": { | |
"post": { | |
"tags": [ | |
"Generative Table" | |
], | |
"summary": "Hybrid Search", | |
"operationId": "hybrid_search", | |
"parameters": [ | |
{ | |
"name": "table_type", | |
"in": "path", | |
"required": true, | |
"schema": { | |
"allOf": [ | |
{ | |
"type": "string", | |
"enum": [ | |
"action", | |
"knowledge", | |
"chat" | |
], | |
"title": "TableType" | |
} | |
], | |
"description": "Table type.", | |
"title": "Table Type" | |
}, | |
"description": "Table type." | |
}, | |
{ | |
"name": "openai-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "OpenAI API key.", | |
"default": "", | |
"title": "Openai-Api-Key" | |
}, | |
"description": "OpenAI API key." | |
}, | |
{ | |
"name": "anthropic-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Anthropic API key.", | |
"default": "", | |
"title": "Anthropic-Api-Key" | |
}, | |
"description": "Anthropic API key." | |
}, | |
{ | |
"name": "gemini-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Google Gemini API key.", | |
"default": "", | |
"title": "Gemini-Api-Key" | |
}, | |
"description": "Google Gemini API key." | |
}, | |
{ | |
"name": "cohere-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Cohere API key.", | |
"default": "", | |
"title": "Cohere-Api-Key" | |
}, | |
"description": "Cohere API key." | |
}, | |
{ | |
"name": "groq-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Groq API key.", | |
"default": "", | |
"title": "Groq-Api-Key" | |
}, | |
"description": "Groq API key." | |
}, | |
{ | |
"name": "together-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Together AI API key.", | |
"default": "", | |
"title": "Together-Api-Key" | |
}, | |
"description": "Together AI API key." | |
}, | |
{ | |
"name": "jina-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Jina API key.", | |
"default": "", | |
"title": "Jina-Api-Key" | |
}, | |
"description": "Jina API key." | |
}, | |
{ | |
"name": "voyage-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Voyage API key.", | |
"default": "", | |
"title": "Voyage-Api-Key" | |
}, | |
"description": "Voyage API key." | |
} | |
], | |
"requestBody": { | |
"required": true, | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"table_id": { | |
"type": "string", | |
"pattern": "^[A-Za-z0-9]([A-Za-z0-9._-]{0,98}[A-Za-z0-9])?$", | |
"title": "Table Id", | |
"description": "Table name or ID." | |
}, | |
"query": { | |
"type": "string", | |
"minLength": 1, | |
"title": "Query", | |
"description": "Query for full-text-search (FTS) and vector search. Must not be empty." | |
}, | |
"where": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Where", | |
"description": "_Optional_. SQL where clause. If not provided, will match all rows." | |
}, | |
"limit": { | |
"type": "integer", | |
"maximum": 1000, | |
"exclusiveMinimum": 0, | |
"title": "Limit", | |
"description": "_Optional_. Min 1, max 1000. Number of rows to return.", | |
"default": 100 | |
}, | |
"metric": { | |
"type": "string", | |
"title": "Metric", | |
"description": "_Optional_. Vector search similarity metric. Defaults to \"cosine\".", | |
"default": "cosine" | |
}, | |
"nprobes": { | |
"type": "integer", | |
"maximum": 1000, | |
"exclusiveMinimum": 0, | |
"title": "Nprobes", | |
"description": "_Optional_. Set the number of partitions to search (probe).This argument is only used when the vector column has an IVF PQ index. If there is no index then this value is ignored. The IVF stage of IVF PQ divides the input into partitions (clusters) of related values. The partition whose centroids are closest to the query vector will be exhaustively searched to find matches. This parameter controls how many partitions should be searched. Increasing this value will increase the recall of your query but will also increase the latency of your query. Defaults to 50.", | |
"default": 50 | |
}, | |
"refine_factor": { | |
"type": "integer", | |
"maximum": 1000, | |
"exclusiveMinimum": 0, | |
"title": "Refine Factor", | |
"description": "_Optional_. A multiplier to control how many additional rows are taken during the refine step. This argument is only used when the vector column has an IVF PQ index. If there is no index then this value is ignored. An IVF PQ index stores compressed (quantized) values. They query vector is compared against these values and, since they are compressed, the comparison is inaccurate. This parameter can be used to refine the results. It can improve both improve recall and correct the ordering of the nearest results. To refine results LanceDb will first perform an ANN search to find the nearest limit * refine_factor results. In other words, if refine_factor is 3 and limit is the default (10) then the first 30 results will be selected. LanceDb then fetches the full, uncompressed, values for these 30 results. The results are then reordered by the true distance and only the nearest 10 are kept. Defaults to 50.", | |
"default": 20 | |
}, | |
"float_decimals": { | |
"type": "integer", | |
"title": "Float Decimals", | |
"description": "_Optional_. Number of decimals for float values. Defaults to 0 (no rounding).", | |
"default": 0 | |
}, | |
"vec_decimals": { | |
"type": "integer", | |
"title": "Vec Decimals", | |
"description": "_Optional_. Number of decimals for vectors. If its negative, exclude vector columns. Defaults to 0 (no rounding).", | |
"default": 0 | |
}, | |
"reranking_model": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Reranking Model", | |
"description": "Reranking model to use for hybrid search." | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"table_id", | |
"query" | |
], | |
"title": "SearchRequest" | |
} | |
} | |
} | |
}, | |
"responses": { | |
"200": { | |
"description": "Successful Response", | |
"content": { | |
"application/json": { | |
"schema": { | |
"type": "array", | |
"items": { | |
"type": "object", | |
"patternProperties": { | |
"^[A-Za-z0-9]([A-Za-z0-9 _-]{0,98}[A-Za-z0-9])?$": {} | |
} | |
}, | |
"title": "Response Hybrid Search" | |
} | |
} | |
} | |
}, | |
"422": { | |
"description": "Validation Error", | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"detail": { | |
"items": { | |
"properties": { | |
"loc": { | |
"items": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "integer" | |
} | |
] | |
}, | |
"type": "array", | |
"title": "Location" | |
}, | |
"msg": { | |
"type": "string", | |
"title": "Message" | |
}, | |
"type": { | |
"type": "string", | |
"title": "Error Type" | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"loc", | |
"msg", | |
"type" | |
], | |
"title": "ValidationError" | |
}, | |
"type": "array", | |
"title": "Detail" | |
} | |
}, | |
"type": "object", | |
"title": "HTTPValidationError" | |
} | |
} | |
} | |
} | |
} | |
} | |
}, | |
"/api/v1/gen_tables/knowledge/upload_file": { | |
"post": { | |
"tags": [ | |
"Generative Table" | |
], | |
"summary": "Upload File", | |
"operationId": "upload_file", | |
"parameters": [ | |
{ | |
"name": "openai-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "OpenAI API key.", | |
"default": "", | |
"title": "Openai-Api-Key" | |
}, | |
"description": "OpenAI API key." | |
}, | |
{ | |
"name": "anthropic-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Anthropic API key.", | |
"default": "", | |
"title": "Anthropic-Api-Key" | |
}, | |
"description": "Anthropic API key." | |
}, | |
{ | |
"name": "gemini-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Google Gemini API key.", | |
"default": "", | |
"title": "Gemini-Api-Key" | |
}, | |
"description": "Google Gemini API key." | |
}, | |
{ | |
"name": "cohere-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Cohere API key.", | |
"default": "", | |
"title": "Cohere-Api-Key" | |
}, | |
"description": "Cohere API key." | |
}, | |
{ | |
"name": "groq-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Groq API key.", | |
"default": "", | |
"title": "Groq-Api-Key" | |
}, | |
"description": "Groq API key." | |
}, | |
{ | |
"name": "together-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Together AI API key.", | |
"default": "", | |
"title": "Together-Api-Key" | |
}, | |
"description": "Together AI API key." | |
}, | |
{ | |
"name": "jina-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Jina API key.", | |
"default": "", | |
"title": "Jina-Api-Key" | |
}, | |
"description": "Jina API key." | |
}, | |
{ | |
"name": "voyage-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Voyage API key.", | |
"default": "", | |
"title": "Voyage-Api-Key" | |
}, | |
"description": "Voyage API key." | |
} | |
], | |
"requestBody": { | |
"required": true, | |
"content": { | |
"multipart/form-data": { | |
"schema": { | |
"properties": { | |
"file": { | |
"type": "string", | |
"format": "binary", | |
"title": "File", | |
"description": "The file." | |
}, | |
"file_name": { | |
"type": "string", | |
"title": "File Name", | |
"description": "File name." | |
}, | |
"table_id": { | |
"type": "string", | |
"pattern": "^[A-Za-z0-9]([A-Za-z0-9._-]{0,98}[A-Za-z0-9])?$", | |
"title": "Table Id", | |
"description": "Knowledge Table ID." | |
}, | |
"chunk_size": { | |
"type": "integer", | |
"exclusiveMinimum": 0, | |
"title": "Chunk Size", | |
"description": "Maximum chunk size (number of characters). Must be > 0.", | |
"default": 1000 | |
}, | |
"chunk_overlap": { | |
"type": "integer", | |
"minimum": 0, | |
"title": "Chunk Overlap", | |
"description": "Overlap in characters between chunks. Must be >= 0.", | |
"default": 200 | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"file", | |
"file_name", | |
"table_id" | |
], | |
"title": "Body_upload_file" | |
} | |
} | |
} | |
}, | |
"responses": { | |
"200": { | |
"description": "Successful Response", | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"ok": { | |
"type": "boolean", | |
"title": "Ok", | |
"default": true | |
} | |
}, | |
"type": "object", | |
"title": "OkResponse" | |
} | |
} | |
} | |
}, | |
"422": { | |
"description": "Validation Error", | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"detail": { | |
"items": { | |
"properties": { | |
"loc": { | |
"items": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "integer" | |
} | |
] | |
}, | |
"type": "array", | |
"title": "Location" | |
}, | |
"msg": { | |
"type": "string", | |
"title": "Message" | |
}, | |
"type": { | |
"type": "string", | |
"title": "Error Type" | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"loc", | |
"msg", | |
"type" | |
], | |
"title": "ValidationError" | |
}, | |
"type": "array", | |
"title": "Detail" | |
} | |
}, | |
"type": "object", | |
"title": "HTTPValidationError" | |
} | |
} | |
} | |
} | |
} | |
} | |
}, | |
"/api/v1/gen_tables/{table_type}/import_data": { | |
"post": { | |
"tags": [ | |
"Generative Table" | |
], | |
"summary": "Import Table Data", | |
"operationId": "import_table_data", | |
"parameters": [ | |
{ | |
"name": "table_type", | |
"in": "path", | |
"required": true, | |
"schema": { | |
"allOf": [ | |
{ | |
"type": "string", | |
"enum": [ | |
"action", | |
"knowledge", | |
"chat" | |
], | |
"title": "TableType" | |
} | |
], | |
"description": "Table type.", | |
"title": "Table Type" | |
}, | |
"description": "Table type." | |
}, | |
{ | |
"name": "openai-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "OpenAI API key.", | |
"default": "", | |
"title": "Openai-Api-Key" | |
}, | |
"description": "OpenAI API key." | |
}, | |
{ | |
"name": "anthropic-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Anthropic API key.", | |
"default": "", | |
"title": "Anthropic-Api-Key" | |
}, | |
"description": "Anthropic API key." | |
}, | |
{ | |
"name": "gemini-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Google Gemini API key.", | |
"default": "", | |
"title": "Gemini-Api-Key" | |
}, | |
"description": "Google Gemini API key." | |
}, | |
{ | |
"name": "cohere-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Cohere API key.", | |
"default": "", | |
"title": "Cohere-Api-Key" | |
}, | |
"description": "Cohere API key." | |
}, | |
{ | |
"name": "groq-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Groq API key.", | |
"default": "", | |
"title": "Groq-Api-Key" | |
}, | |
"description": "Groq API key." | |
}, | |
{ | |
"name": "together-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Together AI API key.", | |
"default": "", | |
"title": "Together-Api-Key" | |
}, | |
"description": "Together AI API key." | |
}, | |
{ | |
"name": "jina-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Jina API key.", | |
"default": "", | |
"title": "Jina-Api-Key" | |
}, | |
"description": "Jina API key." | |
}, | |
{ | |
"name": "voyage-api-key", | |
"in": "header", | |
"required": false, | |
"schema": { | |
"type": "string", | |
"description": "Voyage API key.", | |
"default": "", | |
"title": "Voyage-Api-Key" | |
}, | |
"description": "Voyage API key." | |
} | |
], | |
"requestBody": { | |
"required": true, | |
"content": { | |
"multipart/form-data": { | |
"schema": { | |
"properties": { | |
"file": { | |
"type": "string", | |
"format": "binary", | |
"title": "File", | |
"description": "The CSV or TSV file." | |
}, | |
"file_name": { | |
"type": "string", | |
"title": "File Name", | |
"description": "File name." | |
}, | |
"table_id": { | |
"type": "string", | |
"pattern": "^[A-Za-z0-9]([A-Za-z0-9._-]{0,98}[A-Za-z0-9])?$", | |
"title": "Table Id", | |
"description": "ID or name of the table that the data should be imported into." | |
}, | |
"stream": { | |
"type": "boolean", | |
"title": "Stream", | |
"description": "Whether or not to stream the LLM generation.", | |
"default": true | |
}, | |
"delimiter": { | |
"allOf": [ | |
{ | |
"type": "string", | |
"enum": [ | |
",", | |
"\t" | |
], | |
"title": "CSVDelimiter" | |
} | |
], | |
"description": "The delimiter, can be \",\" or \"\\t\". Defaults to \",\".", | |
"default": "," | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"file", | |
"file_name", | |
"table_id" | |
], | |
"title": "Body_import_table_data" | |
} | |
} | |
} | |
}, | |
"responses": { | |
"200": { | |
"description": "Successful Response", | |
"content": { | |
"application/json": { | |
"schema": {} | |
} | |
} | |
}, | |
"422": { | |
"description": "Validation Error", | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"detail": { | |
"items": { | |
"properties": { | |
"loc": { | |
"items": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "integer" | |
} | |
] | |
}, | |
"type": "array", | |
"title": "Location" | |
}, | |
"msg": { | |
"type": "string", | |
"title": "Message" | |
}, | |
"type": { | |
"type": "string", | |
"title": "Error Type" | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"loc", | |
"msg", | |
"type" | |
], | |
"title": "ValidationError" | |
}, | |
"type": "array", | |
"title": "Detail" | |
} | |
}, | |
"type": "object", | |
"title": "HTTPValidationError" | |
} | |
} | |
} | |
} | |
} | |
} | |
}, | |
"/api/v1/gen_tables/{table_type}/{table_id}/export_data": { | |
"get": { | |
"tags": [ | |
"Generative Table" | |
], | |
"summary": "Export Table Data", | |
"operationId": "export_table_data", | |
"parameters": [ | |
{ | |
"name": "table_type", | |
"in": "path", | |
"required": true, | |
"schema": { | |
"allOf": [ | |
{ | |
"type": "string", | |
"enum": [ | |
"action", | |
"knowledge", | |
"chat" | |
], | |
"title": "TableType" | |
} | |
], | |
"description": "Table type.", | |
"title": "Table Type" | |
}, | |
"description": "Table type." | |
}, | |
{ | |
"name": "table_id", | |
"in": "path", | |
"required": true, | |
"schema": { | |
"type": "string", | |
"pattern": "^[A-Za-z0-9]([A-Za-z0-9._-]{0,98}[A-Za-z0-9])?$", | |
"description": "ID or name of the table to be exported.", | |
"title": "Table Id" | |
}, | |
"description": "ID or name of the table to be exported." | |
}, | |
{ | |
"name": "delimiter", | |
"in": "query", | |
"required": false, | |
"schema": { | |
"allOf": [ | |
{ | |
"type": "string", | |
"enum": [ | |
",", | |
"\t" | |
], | |
"title": "CSVDelimiter" | |
} | |
], | |
"description": "The delimiter, can be \",\" or \"\\t\". Defaults to \",\".", | |
"default": ",", | |
"title": "Delimiter" | |
}, | |
"description": "The delimiter, can be \",\" or \"\\t\". Defaults to \",\"." | |
}, | |
{ | |
"name": "columns", | |
"in": "query", | |
"required": false, | |
"schema": { | |
"anyOf": [ | |
{ | |
"type": "array", | |
"items": { | |
"type": "string", | |
"pattern": "^[A-Za-z0-9]([A-Za-z0-9 _-]{0,98}[A-Za-z0-9])?$", | |
"description": "Column name or ID. Must be unique with at least 1 character and up to 100 characters. Must start and end with an alphabet or number. Characters in the middle can include `_` (underscore), `-` (dash), ` ` (space). Cannot be called \"ID\" or \"Updated at\" (case-insensitive)." | |
} | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"description": "_Optional_. A list of columns to be exported. Defaults to None (export all columns).", | |
"title": "Columns" | |
}, | |
"description": "_Optional_. A list of columns to be exported. Defaults to None (export all columns)." | |
} | |
], | |
"responses": { | |
"200": { | |
"description": "Successful Response", | |
"content": { | |
"application/json": { | |
"schema": {} | |
} | |
} | |
}, | |
"422": { | |
"description": "Validation Error", | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"detail": { | |
"items": { | |
"properties": { | |
"loc": { | |
"items": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "integer" | |
} | |
] | |
}, | |
"type": "array", | |
"title": "Location" | |
}, | |
"msg": { | |
"type": "string", | |
"title": "Message" | |
}, | |
"type": { | |
"type": "string", | |
"title": "Error Type" | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"loc", | |
"msg", | |
"type" | |
], | |
"title": "ValidationError" | |
}, | |
"type": "array", | |
"title": "Detail" | |
} | |
}, | |
"type": "object", | |
"title": "HTTPValidationError" | |
} | |
} | |
} | |
} | |
} | |
} | |
}, | |
"/api/v1/gen_tables/{table_type}/import": { | |
"post": { | |
"tags": [ | |
"Generative Table" | |
], | |
"summary": "Import Table", | |
"operationId": "import_table", | |
"parameters": [ | |
{ | |
"name": "table_type", | |
"in": "path", | |
"required": true, | |
"schema": { | |
"allOf": [ | |
{ | |
"type": "string", | |
"enum": [ | |
"action", | |
"knowledge", | |
"chat" | |
], | |
"title": "TableType" | |
} | |
], | |
"description": "Table type.", | |
"title": "Table Type" | |
}, | |
"description": "Table type." | |
} | |
], | |
"requestBody": { | |
"required": true, | |
"content": { | |
"multipart/form-data": { | |
"schema": { | |
"properties": { | |
"file": { | |
"type": "string", | |
"format": "binary", | |
"title": "File", | |
"description": "The parquet file." | |
}, | |
"table_id_dst": { | |
"type": "string", | |
"pattern": "^[A-Za-z0-9]([A-Za-z0-9._-]{0,98}[A-Za-z0-9])?$", | |
"title": "Table Id Dst", | |
"description": "The ID or name of the new table." | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"file", | |
"table_id_dst" | |
], | |
"title": "Body_import_table" | |
} | |
} | |
} | |
}, | |
"responses": { | |
"200": { | |
"description": "Successful Response", | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"id": { | |
"type": "string", | |
"title": "Id", | |
"description": "Table name." | |
}, | |
"version": { | |
"type": "string", | |
"title": "Version", | |
"description": "Table version, following owl version.", | |
"default": "0.3.0" | |
}, | |
"meta": { | |
"type": "object", | |
"title": "Meta", | |
"description": "Additional metadata about the table.", | |
"default": {} | |
}, | |
"cols": { | |
"items": { | |
"properties": { | |
"id": { | |
"type": "string", | |
"title": "Id", | |
"description": "Column name." | |
}, | |
"dtype": { | |
"allOf": [ | |
{ | |
"type": "string", | |
"enum": [ | |
"int", | |
"int8", | |
"float", | |
"float32", | |
"float16", | |
"bool", | |
"str", | |
"date-time" | |
], | |
"title": "DtypeEnum" | |
} | |
], | |
"description": "Column data type, one of [\"int\", \"int8\", \"float\", \"float32\", \"float16\", \"bool\", \"str\", \"date-time\"]", | |
"default": "str" | |
}, | |
"vlen": { | |
"type": "integer", | |
"minimum": 0, | |
"title": "Vlen", | |
"description": "_Optional_. Vector length. If this is larger than zero, then `dtype` must be one of the floating data types. Defaults to zero.", | |
"default": 0 | |
}, | |
"index": { | |
"type": "boolean", | |
"title": "Index", | |
"description": "_Optional_. Whether to build full-text-search (FTS) or vector index for this column. Only applies to string and vector columns. Defaults to True.", | |
"default": true | |
}, | |
"gen_config": { | |
"anyOf": [ | |
{ | |
"properties": { | |
"id": { | |
"type": "string", | |
"title": "Id", | |
"description": "Chat ID. Must be unique against document ID for it to be embeddable. Defaults to ''.", | |
"default": "" | |
}, | |
"model": { | |
"type": "string", | |
"title": "Model", | |
"description": "ID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API.", | |
"default": "" | |
}, | |
"messages": { | |
"items": { | |
"properties": { | |
"role": { | |
"type": "string", | |
"enum": [ | |
"system", | |
"user", | |
"assistant" | |
], | |
"title": "ChatRole", | |
"description": "Represents who said a chat message." | |
}, | |
"content": { | |
"type": "string", | |
"title": "Content" | |
}, | |
"name": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Name" | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"role", | |
"content" | |
], | |
"title": "ChatEntry", | |
"description": "Represents a message in the chat context." | |
}, | |
"type": "array", | |
"minItems": 1, | |
"title": "Messages", | |
"description": "A list of messages comprising the conversation so far." | |
}, | |
"rag_params": { | |
"anyOf": [ | |
{ | |
"properties": { | |
"table_id": { | |
"type": "string", | |
"minLength": 2, | |
"title": "Table Id", | |
"description": "Knowledge Table ID", | |
"examples": [ | |
"my-dataset" | |
] | |
}, | |
"reranking_model": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Reranking Model", | |
"description": "Reranking model to use for hybrid search.", | |
"examples": [ | |
"cohere/rerank-multilingual-v3.0", | |
null | |
] | |
}, | |
"search_query": { | |
"type": "string", | |
"title": "Search Query", | |
"description": "Query used to retrieve items from the KB database. If not provided (default), it will be generated using LLM.", | |
"default": "" | |
}, | |
"k": { | |
"type": "integer", | |
"maximum": 1024, | |
"exclusiveMinimum": 0, | |
"title": "K", | |
"description": "Top-k closest text in terms of embedding distance. Must be in [1, 1024]. Defaults to 3.", | |
"default": 3, | |
"examples": [ | |
3 | |
] | |
}, | |
"rerank": { | |
"type": "boolean", | |
"title": "Rerank", | |
"description": "Flag to perform rerank on the retrieved results. Defaults to True.", | |
"default": true, | |
"examples": [ | |
true, | |
false | |
] | |
}, | |
"concat_reranker_input": { | |
"type": "boolean", | |
"title": "Concat Reranker Input", | |
"description": "Flag to concat title and content as reranker input. Defaults to False.", | |
"default": false, | |
"examples": [ | |
true, | |
false | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"table_id" | |
], | |
"title": "RAGParams" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"description": "Retrieval Augmented Generation search params. Defaults to None (disabled).", | |
"examples": [ | |
null | |
] | |
}, | |
"temperature": { | |
"type": "number", | |
"maximum": 2, | |
"minimum": 0.001, | |
"title": "Temperature", | |
"description": " What sampling temperature to use, in [0.001, 2.0].\n Higher values like 0.8 will make the output more random,\n while lower values like 0.2 will make it more focused and deterministic.\n", | |
"default": 0.2, | |
"examples": [ | |
0.2 | |
] | |
}, | |
"top_p": { | |
"type": "number", | |
"maximum": 1, | |
"minimum": 0.001, | |
"title": "Top P", | |
"description": " An alternative to sampling with temperature, called nucleus sampling,\n where the model considers the results of the tokens with top_p probability mass.\n So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n Must be in [0.001, 1.0].\n", | |
"default": 0.6, | |
"examples": [ | |
0.6 | |
] | |
}, | |
"n": { | |
"type": "integer", | |
"title": "N", | |
"description": "How many chat completion choices to generate for each input message.", | |
"default": 1, | |
"examples": [ | |
1 | |
] | |
}, | |
"stream": { | |
"type": "boolean", | |
"title": "Stream", | |
"description": "\n If set, partial message deltas will be sent, like in ChatGPT.\n Tokens will be sent as server-sent events as they become available,\n with the stream terminated by a 'data: [DONE]' message.\n", | |
"default": true, | |
"examples": [ | |
true | |
] | |
}, | |
"stop": { | |
"anyOf": [ | |
{ | |
"items": { | |
"type": "string" | |
}, | |
"type": "array" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Stop", | |
"description": "Up to 4 sequences where the API will stop generating further tokens.", | |
"examples": [ | |
null | |
] | |
}, | |
"max_tokens": { | |
"type": "integer", | |
"exclusiveMinimum": 0, | |
"title": "Max Tokens", | |
"description": " The maximum number of tokens to generate in the chat completion.\n Must be in [1, context_length - 1). Default is 2048.\n The total length of input tokens and generated tokens is limited by the model's context length.\n", | |
"default": 2048, | |
"examples": [ | |
2048 | |
] | |
}, | |
"presence_penalty": { | |
"type": "number", | |
"title": "Presence Penalty", | |
"description": " Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far,\n increasing the model's likelihood to talk about new topics.\n", | |
"default": 0, | |
"examples": [ | |
0 | |
] | |
}, | |
"frequency_penalty": { | |
"type": "number", | |
"title": "Frequency Penalty", | |
"description": " Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far,\n decreasing the model's likelihood to repeat the same line verbatim.\n", | |
"default": 0, | |
"examples": [ | |
0 | |
] | |
}, | |
"logit_bias": { | |
"type": "object", | |
"title": "Logit Bias", | |
"description": " Modify the likelihood of specified tokens appearing in the completion.\n Accepts a json object that maps tokens (specified by their token ID in the tokenizer)\n to an associated bias value from -100 to 100.\n Mathematically, the bias is added to the logits generated by the model prior to sampling.\n The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection;\n values like -100 or 100 should result in a ban or exclusive selection of the relevant token.\n", | |
"default": {}, | |
"examples": [ | |
{} | |
] | |
}, | |
"user": { | |
"type": "string", | |
"title": "User", | |
"description": "A unique identifier representing your end-user. For monitoring and debugging purposes.", | |
"default": "", | |
"examples": [ | |
"" | |
] | |
}, | |
"object": { | |
"type": "string", | |
"enum": [ | |
"gen_config.chat" | |
], | |
"const": "gen_config.chat", | |
"title": "Object", | |
"description": "The object type, which is always \"gen_config.chat\".", | |
"default": "gen_config.chat", | |
"examples": [ | |
"gen_config.chat" | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"messages" | |
], | |
"title": "ChatGenConfig" | |
}, | |
{ | |
"properties": { | |
"object": { | |
"type": "string", | |
"enum": [ | |
"gen_config.embed" | |
], | |
"const": "gen_config.embed", | |
"title": "Object", | |
"description": "The object type, which is always \"gen_config.embed\".", | |
"default": "gen_config.embed", | |
"examples": [ | |
"gen_config.embed" | |
] | |
}, | |
"embedding_model": { | |
"type": "string", | |
"title": "Embedding Model", | |
"description": "The embedding model to use.", | |
"examples": [ | |
"openai/text-embedding-3-small-512" | |
] | |
}, | |
"source_column": { | |
"type": "string", | |
"title": "Source Column", | |
"description": "The source column for embedding.", | |
"examples": [ | |
"text_column" | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"embedding_model", | |
"source_column" | |
], | |
"title": "EmbedGenConfig" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Gen Config", | |
"description": "_Optional_. Generation config. If provided, then this column will be an \"Output Column\". Table columns on its left can be referenced by `${column-name}`." | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"id" | |
], | |
"title": "ColumnSchema" | |
}, | |
"type": "array", | |
"title": "Cols", | |
"description": "List of column schema." | |
}, | |
"parent_id": { | |
"anyOf": [ | |
{ | |
"type": "string", | |
"pattern": "^[A-Za-z0-9]([A-Za-z0-9._-]{0,98}[A-Za-z0-9])?$", | |
"description": "Table name or ID. Must be unique with at least 1 character and up to 100 characters. Must start and end with an alphabet or number. Characters in the middle can include `_` (underscore), `-` (dash), `.` (dot)." | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Parent Id", | |
"description": "The parent table ID. If None (default), it means this is a template table." | |
}, | |
"title": { | |
"type": "string", | |
"title": "Title", | |
"description": "Chat title. Defaults to ''." | |
}, | |
"updated_at": { | |
"type": "string", | |
"title": "Updated At", | |
"description": "Table last update timestamp (ISO 8601 UTC)." | |
}, | |
"indexed_at_fts": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Indexed At Fts", | |
"description": "Table last FTS index timestamp (ISO 8601 UTC)." | |
}, | |
"indexed_at_vec": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Indexed At Vec", | |
"description": "Table last vector index timestamp (ISO 8601 UTC)." | |
}, | |
"indexed_at_sca": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Indexed At Sca", | |
"description": "Table last scalar index timestamp (ISO 8601 UTC)." | |
}, | |
"num_rows": { | |
"type": "integer", | |
"title": "Num Rows", | |
"description": "Number of rows in the table." | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"id", | |
"cols", | |
"parent_id", | |
"title", | |
"updated_at", | |
"indexed_at_fts", | |
"indexed_at_vec", | |
"indexed_at_sca", | |
"num_rows" | |
], | |
"title": "TableMetaResponse" | |
} | |
} | |
} | |
}, | |
"422": { | |
"description": "Validation Error", | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"detail": { | |
"items": { | |
"properties": { | |
"loc": { | |
"items": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "integer" | |
} | |
] | |
}, | |
"type": "array", | |
"title": "Location" | |
}, | |
"msg": { | |
"type": "string", | |
"title": "Message" | |
}, | |
"type": { | |
"type": "string", | |
"title": "Error Type" | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"loc", | |
"msg", | |
"type" | |
], | |
"title": "ValidationError" | |
}, | |
"type": "array", | |
"title": "Detail" | |
} | |
}, | |
"type": "object", | |
"title": "HTTPValidationError" | |
} | |
} | |
} | |
} | |
} | |
} | |
}, | |
"/api/v1/gen_tables/{table_type}/{table_id}/export": { | |
"get": { | |
"tags": [ | |
"Generative Table" | |
], | |
"summary": "Export Table", | |
"operationId": "export_table", | |
"parameters": [ | |
{ | |
"name": "table_type", | |
"in": "path", | |
"required": true, | |
"schema": { | |
"allOf": [ | |
{ | |
"type": "string", | |
"enum": [ | |
"action", | |
"knowledge", | |
"chat" | |
], | |
"title": "TableType" | |
} | |
], | |
"description": "Table type.", | |
"title": "Table Type" | |
}, | |
"description": "Table type." | |
}, | |
{ | |
"name": "table_id", | |
"in": "path", | |
"required": true, | |
"schema": { | |
"type": "string", | |
"pattern": "^[A-Za-z0-9]([A-Za-z0-9._-]{0,98}[A-Za-z0-9])?$", | |
"description": "ID or name of the table to be exported.", | |
"title": "Table Id" | |
}, | |
"description": "ID or name of the table to be exported." | |
} | |
], | |
"responses": { | |
"200": { | |
"description": "Successful Response", | |
"content": { | |
"application/json": { | |
"schema": {} | |
} | |
} | |
}, | |
"422": { | |
"description": "Validation Error", | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"detail": { | |
"items": { | |
"properties": { | |
"loc": { | |
"items": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "integer" | |
} | |
] | |
}, | |
"type": "array", | |
"title": "Location" | |
}, | |
"msg": { | |
"type": "string", | |
"title": "Message" | |
}, | |
"type": { | |
"type": "string", | |
"title": "Error Type" | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"loc", | |
"msg", | |
"type" | |
], | |
"title": "ValidationError" | |
}, | |
"type": "array", | |
"title": "Detail" | |
} | |
}, | |
"type": "object", | |
"title": "HTTPValidationError" | |
} | |
} | |
} | |
} | |
} | |
} | |
}, | |
"/api/org/v1/models": { | |
"get": { | |
"tags": [ | |
"Organization" | |
], | |
"summary": "Get Org Model Config", | |
"operationId": "get_org_model_config", | |
"responses": { | |
"200": { | |
"description": "Successful Response", | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"llm_models": { | |
"items": { | |
"properties": { | |
"id": { | |
"type": "string", | |
"title": "Id", | |
"description": "Unique identifier in the form of \"{provider}/{model_id}\". Users will specify this to select a model.", | |
"examples": [ | |
"openai/gpt-4o-mini" | |
] | |
}, | |
"object": { | |
"type": "string", | |
"title": "Object", | |
"description": "Type of API response object.", | |
"default": "model", | |
"examples": [ | |
"model" | |
] | |
}, | |
"name": { | |
"type": "string", | |
"title": "Name", | |
"description": "Name of model.", | |
"examples": [ | |
"OpenAI GPT-4o Mini" | |
] | |
}, | |
"context_length": { | |
"type": "integer", | |
"title": "Context Length", | |
"description": "Context length of model.", | |
"examples": [ | |
16384 | |
] | |
}, | |
"languages": { | |
"items": { | |
"type": "string" | |
}, | |
"type": "array", | |
"title": "Languages", | |
"description": "List of languages which the model is well-versed in.", | |
"examples": [ | |
[ | |
"en" | |
] | |
] | |
}, | |
"owned_by": { | |
"type": "string", | |
"title": "Owned By", | |
"description": "The organization that owns the model. Defaults to the provider in model ID.", | |
"default": "", | |
"examples": [ | |
"openai" | |
] | |
}, | |
"capabilities": { | |
"items": { | |
"type": "string", | |
"enum": [ | |
"completion", | |
"chat", | |
"image", | |
"embed", | |
"rerank" | |
] | |
}, | |
"type": "array", | |
"title": "Capabilities", | |
"description": "List of capabilities of model.", | |
"examples": [ | |
[ | |
"chat" | |
] | |
] | |
}, | |
"internal_only": { | |
"type": "boolean", | |
"title": "Internal Only", | |
"description": "Whether this model is for internal use only.", | |
"default": false | |
}, | |
"litellm_id": { | |
"type": "string", | |
"title": "Litellm Id", | |
"description": "LiteLLM routing / mapping ID. For example, you can map \"openai/gpt-4o\" calls to \"openai/gpt-4o-2024-08-06\". For vLLM with OpenAI compatible server, use \"openai/<vllm_model_id>\".", | |
"default": "", | |
"examples": [ | |
"openai/gpt-4o-mini" | |
] | |
}, | |
"api_base": { | |
"type": "string", | |
"title": "Api Base", | |
"description": "Hosting url for the model.", | |
"default": "" | |
}, | |
"priority": { | |
"type": "integer", | |
"minimum": 0, | |
"title": "Priority", | |
"description": "Priority when assigning default model. Larger number means higher priority.", | |
"default": 0 | |
}, | |
"input_cost_per_mtoken": { | |
"type": "number", | |
"title": "Input Cost Per Mtoken", | |
"description": "Cost in USD per million (mega) input / prompt token.", | |
"default": -1 | |
}, | |
"output_cost_per_mtoken": { | |
"type": "number", | |
"title": "Output Cost Per Mtoken", | |
"description": "Cost in USD per million (mega) output / completion token.", | |
"default": -1 | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"id", | |
"name", | |
"context_length", | |
"languages", | |
"capabilities" | |
], | |
"title": "LLMModelConfig" | |
}, | |
"type": "array", | |
"title": "Llm Models", | |
"default": [] | |
}, | |
"embed_models": { | |
"items": { | |
"properties": { | |
"id": { | |
"type": "string", | |
"title": "Id", | |
"description": "Unique identifier in the form of \"{provider}/{model_id}\". For self-hosted models with Infinity, use \"ellm/{org}/{model}\". Users will specify this to select a model.", | |
"examples": [ | |
"ellm/sentence-transformers/all-MiniLM-L6-v2", | |
"openai/text-embedding-3-small-512" | |
] | |
}, | |
"object": { | |
"type": "string", | |
"title": "Object", | |
"description": "Type of API response object.", | |
"default": "model", | |
"examples": [ | |
"model" | |
] | |
}, | |
"name": { | |
"type": "string", | |
"title": "Name", | |
"description": "Name of model.", | |
"examples": [ | |
"OpenAI GPT-4o Mini" | |
] | |
}, | |
"context_length": { | |
"type": "integer", | |
"title": "Context Length", | |
"description": "Context length of model.", | |
"examples": [ | |
16384 | |
] | |
}, | |
"languages": { | |
"items": { | |
"type": "string" | |
}, | |
"type": "array", | |
"title": "Languages", | |
"description": "List of languages which the model is well-versed in.", | |
"examples": [ | |
[ | |
"en" | |
] | |
] | |
}, | |
"owned_by": { | |
"type": "string", | |
"title": "Owned By", | |
"description": "The organization that owns the model. Defaults to the provider in model ID.", | |
"default": "", | |
"examples": [ | |
"openai" | |
] | |
}, | |
"capabilities": { | |
"items": { | |
"type": "string", | |
"enum": [ | |
"completion", | |
"chat", | |
"image", | |
"embed", | |
"rerank" | |
] | |
}, | |
"type": "array", | |
"title": "Capabilities", | |
"description": "List of capabilities of model.", | |
"examples": [ | |
[ | |
"chat" | |
] | |
] | |
}, | |
"internal_only": { | |
"type": "boolean", | |
"title": "Internal Only", | |
"description": "Whether this model is for internal use only.", | |
"default": false | |
}, | |
"litellm_id": { | |
"type": "string", | |
"title": "Litellm Id", | |
"description": "LiteLLM routing / mapping ID. For example, you can map \"openai/gpt-4o\" calls to \"openai/gpt-4o-2024-08-06\". For vLLM with OpenAI compatible server, use \"openai/<vllm_model_id>\".", | |
"default": "", | |
"examples": [ | |
"openai/gpt-4o-mini" | |
] | |
}, | |
"api_base": { | |
"type": "string", | |
"title": "Api Base", | |
"description": "Hosting url for the model.", | |
"default": "" | |
}, | |
"priority": { | |
"type": "integer", | |
"minimum": 0, | |
"title": "Priority", | |
"description": "Priority when assigning default model. Larger number means higher priority.", | |
"default": 0 | |
}, | |
"embedding_size": { | |
"type": "integer", | |
"title": "Embedding Size", | |
"description": "Embedding size of the model" | |
}, | |
"dimensions": { | |
"anyOf": [ | |
{ | |
"type": "integer" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Dimensions", | |
"description": "Dimensions, a reduced embedding size (openai specs)." | |
}, | |
"transform_query": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Transform Query", | |
"description": "Transform query that might be needed, esp. for hf models" | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"id", | |
"name", | |
"context_length", | |
"languages", | |
"capabilities", | |
"embedding_size" | |
], | |
"title": "EmbeddingModelConfig" | |
}, | |
"type": "array", | |
"title": "Embed Models", | |
"default": [] | |
}, | |
"rerank_models": { | |
"items": { | |
"properties": { | |
"id": { | |
"type": "string", | |
"title": "Id", | |
"description": "Unique identifier in the form of \"{provider}/{model_id}\". For self-hosted models with Infinity, use \"ellm/{org}/{model}\". Users will specify this to select a model.", | |
"examples": [ | |
"ellm/cross-encoder/ms-marco-TinyBERT-L-2", | |
"cohere/rerank-multilingual-v3.0" | |
] | |
}, | |
"object": { | |
"type": "string", | |
"title": "Object", | |
"description": "Type of API response object.", | |
"default": "model", | |
"examples": [ | |
"model" | |
] | |
}, | |
"name": { | |
"type": "string", | |
"title": "Name", | |
"description": "Name of model.", | |
"examples": [ | |
"OpenAI GPT-4o Mini" | |
] | |
}, | |
"context_length": { | |
"type": "integer", | |
"title": "Context Length", | |
"description": "Context length of model.", | |
"examples": [ | |
16384 | |
] | |
}, | |
"languages": { | |
"items": { | |
"type": "string" | |
}, | |
"type": "array", | |
"title": "Languages", | |
"description": "List of languages which the model is well-versed in.", | |
"examples": [ | |
[ | |
"en" | |
] | |
] | |
}, | |
"owned_by": { | |
"type": "string", | |
"title": "Owned By", | |
"description": "The organization that owns the model. Defaults to the provider in model ID.", | |
"default": "", | |
"examples": [ | |
"openai" | |
] | |
}, | |
"capabilities": { | |
"items": { | |
"type": "string", | |
"enum": [ | |
"completion", | |
"chat", | |
"image", | |
"embed", | |
"rerank" | |
] | |
}, | |
"type": "array", | |
"title": "Capabilities", | |
"description": "List of capabilities of model.", | |
"examples": [ | |
[ | |
"chat" | |
] | |
] | |
}, | |
"internal_only": { | |
"type": "boolean", | |
"title": "Internal Only", | |
"description": "Whether this model is for internal use only.", | |
"default": false | |
}, | |
"litellm_id": { | |
"type": "string", | |
"title": "Litellm Id", | |
"description": "LiteLLM routing / mapping ID. For example, you can map \"openai/gpt-4o\" calls to \"openai/gpt-4o-2024-08-06\". For vLLM with OpenAI compatible server, use \"openai/<vllm_model_id>\".", | |
"default": "", | |
"examples": [ | |
"openai/gpt-4o-mini" | |
] | |
}, | |
"api_base": { | |
"type": "string", | |
"title": "Api Base", | |
"description": "Hosting url for the model.", | |
"default": "" | |
}, | |
"priority": { | |
"type": "integer", | |
"minimum": 0, | |
"title": "Priority", | |
"description": "Priority when assigning default model. Larger number means higher priority.", | |
"default": 0 | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"id", | |
"name", | |
"context_length", | |
"languages", | |
"capabilities" | |
], | |
"title": "RerankingModelConfig" | |
}, | |
"type": "array", | |
"title": "Rerank Models", | |
"default": [] | |
} | |
}, | |
"type": "object", | |
"title": "ModelListConfig" | |
} | |
} | |
} | |
} | |
} | |
}, | |
"patch": { | |
"tags": [ | |
"Organization" | |
], | |
"summary": "Set Org Model Config", | |
"operationId": "set_org_model_config", | |
"requestBody": { | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"llm_models": { | |
"items": { | |
"properties": { | |
"id": { | |
"type": "string", | |
"title": "Id", | |
"description": "Unique identifier in the form of \"{provider}/{model_id}\". Users will specify this to select a model.", | |
"examples": [ | |
"openai/gpt-4o-mini" | |
] | |
}, | |
"object": { | |
"type": "string", | |
"title": "Object", | |
"description": "Type of API response object.", | |
"default": "model", | |
"examples": [ | |
"model" | |
] | |
}, | |
"name": { | |
"type": "string", | |
"title": "Name", | |
"description": "Name of model.", | |
"examples": [ | |
"OpenAI GPT-4o Mini" | |
] | |
}, | |
"context_length": { | |
"type": "integer", | |
"title": "Context Length", | |
"description": "Context length of model.", | |
"examples": [ | |
16384 | |
] | |
}, | |
"languages": { | |
"items": { | |
"type": "string" | |
}, | |
"type": "array", | |
"title": "Languages", | |
"description": "List of languages which the model is well-versed in.", | |
"examples": [ | |
[ | |
"en" | |
] | |
] | |
}, | |
"owned_by": { | |
"type": "string", | |
"title": "Owned By", | |
"description": "The organization that owns the model. Defaults to the provider in model ID.", | |
"default": "", | |
"examples": [ | |
"openai" | |
] | |
}, | |
"capabilities": { | |
"items": { | |
"type": "string", | |
"enum": [ | |
"completion", | |
"chat", | |
"image", | |
"embed", | |
"rerank" | |
] | |
}, | |
"type": "array", | |
"title": "Capabilities", | |
"description": "List of capabilities of model.", | |
"examples": [ | |
[ | |
"chat" | |
] | |
] | |
}, | |
"internal_only": { | |
"type": "boolean", | |
"title": "Internal Only", | |
"description": "Whether this model is for internal use only.", | |
"default": false | |
}, | |
"litellm_id": { | |
"type": "string", | |
"title": "Litellm Id", | |
"description": "LiteLLM routing / mapping ID. For example, you can map \"openai/gpt-4o\" calls to \"openai/gpt-4o-2024-08-06\". For vLLM with OpenAI compatible server, use \"openai/<vllm_model_id>\".", | |
"default": "", | |
"examples": [ | |
"openai/gpt-4o-mini" | |
] | |
}, | |
"api_base": { | |
"type": "string", | |
"title": "Api Base", | |
"description": "Hosting url for the model.", | |
"default": "" | |
}, | |
"priority": { | |
"type": "integer", | |
"minimum": 0, | |
"title": "Priority", | |
"description": "Priority when assigning default model. Larger number means higher priority.", | |
"default": 0 | |
}, | |
"input_cost_per_mtoken": { | |
"type": "number", | |
"title": "Input Cost Per Mtoken", | |
"description": "Cost in USD per million (mega) input / prompt token.", | |
"default": -1 | |
}, | |
"output_cost_per_mtoken": { | |
"type": "number", | |
"title": "Output Cost Per Mtoken", | |
"description": "Cost in USD per million (mega) output / completion token.", | |
"default": -1 | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"id", | |
"name", | |
"context_length", | |
"languages", | |
"capabilities" | |
], | |
"title": "LLMModelConfig" | |
}, | |
"type": "array", | |
"title": "Llm Models", | |
"default": [] | |
}, | |
"embed_models": { | |
"items": { | |
"properties": { | |
"id": { | |
"type": "string", | |
"title": "Id", | |
"description": "Unique identifier in the form of \"{provider}/{model_id}\". For self-hosted models with Infinity, use \"ellm/{org}/{model}\". Users will specify this to select a model.", | |
"examples": [ | |
"ellm/sentence-transformers/all-MiniLM-L6-v2", | |
"openai/text-embedding-3-small-512" | |
] | |
}, | |
"object": { | |
"type": "string", | |
"title": "Object", | |
"description": "Type of API response object.", | |
"default": "model", | |
"examples": [ | |
"model" | |
] | |
}, | |
"name": { | |
"type": "string", | |
"title": "Name", | |
"description": "Name of model.", | |
"examples": [ | |
"OpenAI GPT-4o Mini" | |
] | |
}, | |
"context_length": { | |
"type": "integer", | |
"title": "Context Length", | |
"description": "Context length of model.", | |
"examples": [ | |
16384 | |
] | |
}, | |
"languages": { | |
"items": { | |
"type": "string" | |
}, | |
"type": "array", | |
"title": "Languages", | |
"description": "List of languages which the model is well-versed in.", | |
"examples": [ | |
[ | |
"en" | |
] | |
] | |
}, | |
"owned_by": { | |
"type": "string", | |
"title": "Owned By", | |
"description": "The organization that owns the model. Defaults to the provider in model ID.", | |
"default": "", | |
"examples": [ | |
"openai" | |
] | |
}, | |
"capabilities": { | |
"items": { | |
"type": "string", | |
"enum": [ | |
"completion", | |
"chat", | |
"image", | |
"embed", | |
"rerank" | |
] | |
}, | |
"type": "array", | |
"title": "Capabilities", | |
"description": "List of capabilities of model.", | |
"examples": [ | |
[ | |
"chat" | |
] | |
] | |
}, | |
"internal_only": { | |
"type": "boolean", | |
"title": "Internal Only", | |
"description": "Whether this model is for internal use only.", | |
"default": false | |
}, | |
"litellm_id": { | |
"type": "string", | |
"title": "Litellm Id", | |
"description": "LiteLLM routing / mapping ID. For example, you can map \"openai/gpt-4o\" calls to \"openai/gpt-4o-2024-08-06\". For vLLM with OpenAI compatible server, use \"openai/<vllm_model_id>\".", | |
"default": "", | |
"examples": [ | |
"openai/gpt-4o-mini" | |
] | |
}, | |
"api_base": { | |
"type": "string", | |
"title": "Api Base", | |
"description": "Hosting url for the model.", | |
"default": "" | |
}, | |
"priority": { | |
"type": "integer", | |
"minimum": 0, | |
"title": "Priority", | |
"description": "Priority when assigning default model. Larger number means higher priority.", | |
"default": 0 | |
}, | |
"embedding_size": { | |
"type": "integer", | |
"title": "Embedding Size", | |
"description": "Embedding size of the model" | |
}, | |
"dimensions": { | |
"anyOf": [ | |
{ | |
"type": "integer" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Dimensions", | |
"description": "Dimensions, a reduced embedding size (openai specs)." | |
}, | |
"transform_query": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Transform Query", | |
"description": "Transform query that might be needed, esp. for hf models" | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"id", | |
"name", | |
"context_length", | |
"languages", | |
"capabilities", | |
"embedding_size" | |
], | |
"title": "EmbeddingModelConfig" | |
}, | |
"type": "array", | |
"title": "Embed Models", | |
"default": [] | |
}, | |
"rerank_models": { | |
"items": { | |
"properties": { | |
"id": { | |
"type": "string", | |
"title": "Id", | |
"description": "Unique identifier in the form of \"{provider}/{model_id}\". For self-hosted models with Infinity, use \"ellm/{org}/{model}\". Users will specify this to select a model.", | |
"examples": [ | |
"ellm/cross-encoder/ms-marco-TinyBERT-L-2", | |
"cohere/rerank-multilingual-v3.0" | |
] | |
}, | |
"object": { | |
"type": "string", | |
"title": "Object", | |
"description": "Type of API response object.", | |
"default": "model", | |
"examples": [ | |
"model" | |
] | |
}, | |
"name": { | |
"type": "string", | |
"title": "Name", | |
"description": "Name of model.", | |
"examples": [ | |
"OpenAI GPT-4o Mini" | |
] | |
}, | |
"context_length": { | |
"type": "integer", | |
"title": "Context Length", | |
"description": "Context length of model.", | |
"examples": [ | |
16384 | |
] | |
}, | |
"languages": { | |
"items": { | |
"type": "string" | |
}, | |
"type": "array", | |
"title": "Languages", | |
"description": "List of languages which the model is well-versed in.", | |
"examples": [ | |
[ | |
"en" | |
] | |
] | |
}, | |
"owned_by": { | |
"type": "string", | |
"title": "Owned By", | |
"description": "The organization that owns the model. Defaults to the provider in model ID.", | |
"default": "", | |
"examples": [ | |
"openai" | |
] | |
}, | |
"capabilities": { | |
"items": { | |
"type": "string", | |
"enum": [ | |
"completion", | |
"chat", | |
"image", | |
"embed", | |
"rerank" | |
] | |
}, | |
"type": "array", | |
"title": "Capabilities", | |
"description": "List of capabilities of model.", | |
"examples": [ | |
[ | |
"chat" | |
] | |
] | |
}, | |
"internal_only": { | |
"type": "boolean", | |
"title": "Internal Only", | |
"description": "Whether this model is for internal use only.", | |
"default": false | |
}, | |
"litellm_id": { | |
"type": "string", | |
"title": "Litellm Id", | |
"description": "LiteLLM routing / mapping ID. For example, you can map \"openai/gpt-4o\" calls to \"openai/gpt-4o-2024-08-06\". For vLLM with OpenAI compatible server, use \"openai/<vllm_model_id>\".", | |
"default": "", | |
"examples": [ | |
"openai/gpt-4o-mini" | |
] | |
}, | |
"api_base": { | |
"type": "string", | |
"title": "Api Base", | |
"description": "Hosting url for the model.", | |
"default": "" | |
}, | |
"priority": { | |
"type": "integer", | |
"minimum": 0, | |
"title": "Priority", | |
"description": "Priority when assigning default model. Larger number means higher priority.", | |
"default": 0 | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"id", | |
"name", | |
"context_length", | |
"languages", | |
"capabilities" | |
], | |
"title": "RerankingModelConfig" | |
}, | |
"type": "array", | |
"title": "Rerank Models", | |
"default": [] | |
} | |
}, | |
"type": "object", | |
"title": "ModelListConfig" | |
} | |
} | |
}, | |
"required": true | |
}, | |
"responses": { | |
"200": { | |
"description": "Successful Response", | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"ok": { | |
"type": "boolean", | |
"title": "Ok", | |
"default": true | |
} | |
}, | |
"type": "object", | |
"title": "OkResponse" | |
} | |
} | |
} | |
}, | |
"422": { | |
"description": "Validation Error", | |
"content": { | |
"application/json": { | |
"schema": { | |
"properties": { | |
"detail": { | |
"items": { | |
"properties": { | |
"loc": { | |
"items": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "integer" | |
} | |
] | |
}, | |
"type": "array", | |
"title": "Location" | |
}, | |
"msg": { | |
"type": "string", | |
"title": "Message" | |
}, | |
"type": { | |
"type": "string", | |
"title": "Error Type" | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"loc", | |
"msg", | |
"type" | |
], | |
"title": "ValidationError" | |
}, | |
"type": "array", | |
"title": "Detail" | |
} | |
}, | |
"type": "object", | |
"title": "HTTPValidationError" | |
} | |
} | |
} | |
} | |
} | |
} | |
}, | |
"/api/health": { | |
"get": { | |
"tags": [ | |
"api" | |
], | |
"summary": "Health", | |
"description": "Health check.", | |
"operationId": "health_api_health_get", | |
"responses": { | |
"200": { | |
"description": "Successful Response", | |
"content": { | |
"application/json": { | |
"schema": {} | |
} | |
} | |
} | |
} | |
} | |
} | |
}, | |
"components": { | |
"schemas": { | |
"ActionTableSchemaCreate": { | |
"properties": { | |
"id": { | |
"type": "string", | |
"pattern": "^[A-Za-z0-9]([A-Za-z0-9._-]{0,98}[A-Za-z0-9])?$", | |
"title": "Id", | |
"description": "Table name." | |
}, | |
"version": { | |
"type": "string", | |
"title": "Version", | |
"description": "Table version, following owl version.", | |
"default": "0.3.0" | |
}, | |
"meta": { | |
"type": "object", | |
"title": "Meta", | |
"description": "Additional metadata about the table.", | |
"default": {} | |
}, | |
"cols": { | |
"items": { | |
"properties": { | |
"id": { | |
"type": "string", | |
"pattern": "^[A-Za-z0-9]([A-Za-z0-9 _-]{0,98}[A-Za-z0-9])?$", | |
"title": "Id", | |
"description": "Column name." | |
}, | |
"dtype": { | |
"allOf": [ | |
{ | |
"type": "string", | |
"enum": [ | |
"int", | |
"float", | |
"bool", | |
"str" | |
], | |
"title": "DtypeCreateEnum" | |
} | |
], | |
"description": "Column data type, one of [\"int\", \"float\", \"bool\", \"str\"]", | |
"default": "str" | |
}, | |
"vlen": { | |
"type": "integer", | |
"minimum": 0, | |
"title": "Vlen", | |
"description": "_Optional_. Vector length. If this is larger than zero, then `dtype` must be one of the floating data types. Defaults to zero.", | |
"default": 0 | |
}, | |
"index": { | |
"type": "boolean", | |
"title": "Index", | |
"description": "_Optional_. Whether to build full-text-search (FTS) or vector index for this column. Only applies to string and vector columns. Defaults to True.", | |
"default": true | |
}, | |
"gen_config": { | |
"anyOf": [ | |
{ | |
"properties": { | |
"id": { | |
"type": "string", | |
"title": "Id", | |
"description": "Chat ID. Must be unique against document ID for it to be embeddable. Defaults to ''.", | |
"default": "" | |
}, | |
"model": { | |
"type": "string", | |
"title": "Model", | |
"description": "ID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API.", | |
"default": "" | |
}, | |
"messages": { | |
"items": { | |
"properties": { | |
"role": { | |
"type": "string", | |
"enum": [ | |
"system", | |
"user", | |
"assistant" | |
], | |
"title": "ChatRole", | |
"description": "Represents who said a chat message." | |
}, | |
"content": { | |
"type": "string", | |
"title": "Content" | |
}, | |
"name": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Name" | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"role", | |
"content" | |
], | |
"title": "ChatEntry", | |
"description": "Represents a message in the chat context." | |
}, | |
"type": "array", | |
"minItems": 1, | |
"title": "Messages", | |
"description": "A list of messages comprising the conversation so far." | |
}, | |
"rag_params": { | |
"anyOf": [ | |
{ | |
"properties": { | |
"table_id": { | |
"type": "string", | |
"minLength": 2, | |
"title": "Table Id", | |
"description": "Knowledge Table ID", | |
"examples": [ | |
"my-dataset" | |
] | |
}, | |
"reranking_model": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Reranking Model", | |
"description": "Reranking model to use for hybrid search.", | |
"examples": [ | |
"cohere/rerank-multilingual-v3.0", | |
null | |
] | |
}, | |
"search_query": { | |
"type": "string", | |
"title": "Search Query", | |
"description": "Query used to retrieve items from the KB database. If not provided (default), it will be generated using LLM.", | |
"default": "" | |
}, | |
"k": { | |
"type": "integer", | |
"maximum": 1024, | |
"exclusiveMinimum": 0, | |
"title": "K", | |
"description": "Top-k closest text in terms of embedding distance. Must be in [1, 1024]. Defaults to 3.", | |
"default": 3, | |
"examples": [ | |
3 | |
] | |
}, | |
"rerank": { | |
"type": "boolean", | |
"title": "Rerank", | |
"description": "Flag to perform rerank on the retrieved results. Defaults to True.", | |
"default": true, | |
"examples": [ | |
true, | |
false | |
] | |
}, | |
"concat_reranker_input": { | |
"type": "boolean", | |
"title": "Concat Reranker Input", | |
"description": "Flag to concat title and content as reranker input. Defaults to False.", | |
"default": false, | |
"examples": [ | |
true, | |
false | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"table_id" | |
], | |
"title": "RAGParams" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"description": "Retrieval Augmented Generation search params. Defaults to None (disabled).", | |
"examples": [ | |
null | |
] | |
}, | |
"temperature": { | |
"type": "number", | |
"maximum": 2, | |
"minimum": 0.001, | |
"title": "Temperature", | |
"description": " What sampling temperature to use, in [0.001, 2.0].\n Higher values like 0.8 will make the output more random,\n while lower values like 0.2 will make it more focused and deterministic.\n", | |
"default": 0.2, | |
"examples": [ | |
0.2 | |
] | |
}, | |
"top_p": { | |
"type": "number", | |
"maximum": 1, | |
"minimum": 0.001, | |
"title": "Top P", | |
"description": " An alternative to sampling with temperature, called nucleus sampling,\n where the model considers the results of the tokens with top_p probability mass.\n So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n Must be in [0.001, 1.0].\n", | |
"default": 0.6, | |
"examples": [ | |
0.6 | |
] | |
}, | |
"n": { | |
"type": "integer", | |
"title": "N", | |
"description": "How many chat completion choices to generate for each input message.", | |
"default": 1, | |
"examples": [ | |
1 | |
] | |
}, | |
"stream": { | |
"type": "boolean", | |
"title": "Stream", | |
"description": "\n If set, partial message deltas will be sent, like in ChatGPT.\n Tokens will be sent as server-sent events as they become available,\n with the stream terminated by a 'data: [DONE]' message.\n", | |
"default": true, | |
"examples": [ | |
true | |
] | |
}, | |
"stop": { | |
"anyOf": [ | |
{ | |
"items": { | |
"type": "string" | |
}, | |
"type": "array" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Stop", | |
"description": "Up to 4 sequences where the API will stop generating further tokens.", | |
"examples": [ | |
null | |
] | |
}, | |
"max_tokens": { | |
"type": "integer", | |
"exclusiveMinimum": 0, | |
"title": "Max Tokens", | |
"description": " The maximum number of tokens to generate in the chat completion.\n Must be in [1, context_length - 1). Default is 2048.\n The total length of input tokens and generated tokens is limited by the model's context length.\n", | |
"default": 2048, | |
"examples": [ | |
2048 | |
] | |
}, | |
"presence_penalty": { | |
"type": "number", | |
"title": "Presence Penalty", | |
"description": " Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far,\n increasing the model's likelihood to talk about new topics.\n", | |
"default": 0, | |
"examples": [ | |
0 | |
] | |
}, | |
"frequency_penalty": { | |
"type": "number", | |
"title": "Frequency Penalty", | |
"description": " Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far,\n decreasing the model's likelihood to repeat the same line verbatim.\n", | |
"default": 0, | |
"examples": [ | |
0 | |
] | |
}, | |
"logit_bias": { | |
"type": "object", | |
"title": "Logit Bias", | |
"description": " Modify the likelihood of specified tokens appearing in the completion.\n Accepts a json object that maps tokens (specified by their token ID in the tokenizer)\n to an associated bias value from -100 to 100.\n Mathematically, the bias is added to the logits generated by the model prior to sampling.\n The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection;\n values like -100 or 100 should result in a ban or exclusive selection of the relevant token.\n", | |
"default": {}, | |
"examples": [ | |
{} | |
] | |
}, | |
"user": { | |
"type": "string", | |
"title": "User", | |
"description": "A unique identifier representing your end-user. For monitoring and debugging purposes.", | |
"default": "", | |
"examples": [ | |
"" | |
] | |
}, | |
"object": { | |
"type": "string", | |
"enum": [ | |
"gen_config.chat" | |
], | |
"const": "gen_config.chat", | |
"title": "Object", | |
"description": "The object type, which is always \"gen_config.chat\".", | |
"default": "gen_config.chat", | |
"examples": [ | |
"gen_config.chat" | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"messages" | |
], | |
"title": "ChatGenConfig" | |
}, | |
{ | |
"properties": { | |
"object": { | |
"type": "string", | |
"enum": [ | |
"gen_config.embed" | |
], | |
"const": "gen_config.embed", | |
"title": "Object", | |
"description": "The object type, which is always \"gen_config.embed\".", | |
"default": "gen_config.embed", | |
"examples": [ | |
"gen_config.embed" | |
] | |
}, | |
"embedding_model": { | |
"type": "string", | |
"title": "Embedding Model", | |
"description": "The embedding model to use.", | |
"examples": [ | |
"openai/text-embedding-3-small-512" | |
] | |
}, | |
"source_column": { | |
"type": "string", | |
"title": "Source Column", | |
"description": "The source column for embedding.", | |
"examples": [ | |
"text_column" | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"embedding_model", | |
"source_column" | |
], | |
"title": "EmbedGenConfig" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Gen Config", | |
"description": "_Optional_. Generation config. If provided, then this column will be an \"Output Column\". Table columns on its left can be referenced by `${column-name}`." | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"id" | |
], | |
"title": "ColumnSchemaCreate" | |
}, | |
"type": "array", | |
"title": "Cols", | |
"description": "List of column schema." | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"id", | |
"cols" | |
], | |
"title": "ActionTableSchemaCreate" | |
}, | |
"AddActionColumnSchema": { | |
"properties": { | |
"id": { | |
"type": "string", | |
"pattern": "^[A-Za-z0-9]([A-Za-z0-9._-]{0,98}[A-Za-z0-9])?$", | |
"title": "Id", | |
"description": "Table name." | |
}, | |
"version": { | |
"type": "string", | |
"title": "Version", | |
"description": "Table version, following owl version.", | |
"default": "0.3.0" | |
}, | |
"meta": { | |
"type": "object", | |
"title": "Meta", | |
"description": "Additional metadata about the table.", | |
"default": {} | |
}, | |
"cols": { | |
"items": { | |
"properties": { | |
"id": { | |
"type": "string", | |
"pattern": "^[A-Za-z0-9]([A-Za-z0-9 _-]{0,98}[A-Za-z0-9])?$", | |
"title": "Id", | |
"description": "Column name." | |
}, | |
"dtype": { | |
"allOf": [ | |
{ | |
"type": "string", | |
"enum": [ | |
"int", | |
"float", | |
"bool", | |
"str" | |
], | |
"title": "DtypeCreateEnum" | |
} | |
], | |
"description": "Column data type, one of [\"int\", \"float\", \"bool\", \"str\"]", | |
"default": "str" | |
}, | |
"vlen": { | |
"type": "integer", | |
"minimum": 0, | |
"title": "Vlen", | |
"description": "_Optional_. Vector length. If this is larger than zero, then `dtype` must be one of the floating data types. Defaults to zero.", | |
"default": 0 | |
}, | |
"index": { | |
"type": "boolean", | |
"title": "Index", | |
"description": "_Optional_. Whether to build full-text-search (FTS) or vector index for this column. Only applies to string and vector columns. Defaults to True.", | |
"default": true | |
}, | |
"gen_config": { | |
"anyOf": [ | |
{ | |
"properties": { | |
"id": { | |
"type": "string", | |
"title": "Id", | |
"description": "Chat ID. Must be unique against document ID for it to be embeddable. Defaults to ''.", | |
"default": "" | |
}, | |
"model": { | |
"type": "string", | |
"title": "Model", | |
"description": "ID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API.", | |
"default": "" | |
}, | |
"messages": { | |
"items": { | |
"properties": { | |
"role": { | |
"type": "string", | |
"enum": [ | |
"system", | |
"user", | |
"assistant" | |
], | |
"title": "ChatRole", | |
"description": "Represents who said a chat message." | |
}, | |
"content": { | |
"type": "string", | |
"title": "Content" | |
}, | |
"name": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Name" | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"role", | |
"content" | |
], | |
"title": "ChatEntry", | |
"description": "Represents a message in the chat context." | |
}, | |
"type": "array", | |
"minItems": 1, | |
"title": "Messages", | |
"description": "A list of messages comprising the conversation so far." | |
}, | |
"rag_params": { | |
"anyOf": [ | |
{ | |
"properties": { | |
"table_id": { | |
"type": "string", | |
"minLength": 2, | |
"title": "Table Id", | |
"description": "Knowledge Table ID", | |
"examples": [ | |
"my-dataset" | |
] | |
}, | |
"reranking_model": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Reranking Model", | |
"description": "Reranking model to use for hybrid search.", | |
"examples": [ | |
"cohere/rerank-multilingual-v3.0", | |
null | |
] | |
}, | |
"search_query": { | |
"type": "string", | |
"title": "Search Query", | |
"description": "Query used to retrieve items from the KB database. If not provided (default), it will be generated using LLM.", | |
"default": "" | |
}, | |
"k": { | |
"type": "integer", | |
"maximum": 1024, | |
"exclusiveMinimum": 0, | |
"title": "K", | |
"description": "Top-k closest text in terms of embedding distance. Must be in [1, 1024]. Defaults to 3.", | |
"default": 3, | |
"examples": [ | |
3 | |
] | |
}, | |
"rerank": { | |
"type": "boolean", | |
"title": "Rerank", | |
"description": "Flag to perform rerank on the retrieved results. Defaults to True.", | |
"default": true, | |
"examples": [ | |
true, | |
false | |
] | |
}, | |
"concat_reranker_input": { | |
"type": "boolean", | |
"title": "Concat Reranker Input", | |
"description": "Flag to concat title and content as reranker input. Defaults to False.", | |
"default": false, | |
"examples": [ | |
true, | |
false | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"table_id" | |
], | |
"title": "RAGParams" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"description": "Retrieval Augmented Generation search params. Defaults to None (disabled).", | |
"examples": [ | |
null | |
] | |
}, | |
"temperature": { | |
"type": "number", | |
"maximum": 2, | |
"minimum": 0.001, | |
"title": "Temperature", | |
"description": " What sampling temperature to use, in [0.001, 2.0].\n Higher values like 0.8 will make the output more random,\n while lower values like 0.2 will make it more focused and deterministic.\n", | |
"default": 0.2, | |
"examples": [ | |
0.2 | |
] | |
}, | |
"top_p": { | |
"type": "number", | |
"maximum": 1, | |
"minimum": 0.001, | |
"title": "Top P", | |
"description": " An alternative to sampling with temperature, called nucleus sampling,\n where the model considers the results of the tokens with top_p probability mass.\n So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n Must be in [0.001, 1.0].\n", | |
"default": 0.6, | |
"examples": [ | |
0.6 | |
] | |
}, | |
"n": { | |
"type": "integer", | |
"title": "N", | |
"description": "How many chat completion choices to generate for each input message.", | |
"default": 1, | |
"examples": [ | |
1 | |
] | |
}, | |
"stream": { | |
"type": "boolean", | |
"title": "Stream", | |
"description": "\n If set, partial message deltas will be sent, like in ChatGPT.\n Tokens will be sent as server-sent events as they become available,\n with the stream terminated by a 'data: [DONE]' message.\n", | |
"default": true, | |
"examples": [ | |
true | |
] | |
}, | |
"stop": { | |
"anyOf": [ | |
{ | |
"items": { | |
"type": "string" | |
}, | |
"type": "array" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Stop", | |
"description": "Up to 4 sequences where the API will stop generating further tokens.", | |
"examples": [ | |
null | |
] | |
}, | |
"max_tokens": { | |
"type": "integer", | |
"exclusiveMinimum": 0, | |
"title": "Max Tokens", | |
"description": " The maximum number of tokens to generate in the chat completion.\n Must be in [1, context_length - 1). Default is 2048.\n The total length of input tokens and generated tokens is limited by the model's context length.\n", | |
"default": 2048, | |
"examples": [ | |
2048 | |
] | |
}, | |
"presence_penalty": { | |
"type": "number", | |
"title": "Presence Penalty", | |
"description": " Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far,\n increasing the model's likelihood to talk about new topics.\n", | |
"default": 0, | |
"examples": [ | |
0 | |
] | |
}, | |
"frequency_penalty": { | |
"type": "number", | |
"title": "Frequency Penalty", | |
"description": " Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far,\n decreasing the model's likelihood to repeat the same line verbatim.\n", | |
"default": 0, | |
"examples": [ | |
0 | |
] | |
}, | |
"logit_bias": { | |
"type": "object", | |
"title": "Logit Bias", | |
"description": " Modify the likelihood of specified tokens appearing in the completion.\n Accepts a json object that maps tokens (specified by their token ID in the tokenizer)\n to an associated bias value from -100 to 100.\n Mathematically, the bias is added to the logits generated by the model prior to sampling.\n The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection;\n values like -100 or 100 should result in a ban or exclusive selection of the relevant token.\n", | |
"default": {}, | |
"examples": [ | |
{} | |
] | |
}, | |
"user": { | |
"type": "string", | |
"title": "User", | |
"description": "A unique identifier representing your end-user. For monitoring and debugging purposes.", | |
"default": "", | |
"examples": [ | |
"" | |
] | |
}, | |
"object": { | |
"type": "string", | |
"enum": [ | |
"gen_config.chat" | |
], | |
"const": "gen_config.chat", | |
"title": "Object", | |
"description": "The object type, which is always \"gen_config.chat\".", | |
"default": "gen_config.chat", | |
"examples": [ | |
"gen_config.chat" | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"messages" | |
], | |
"title": "ChatGenConfig" | |
}, | |
{ | |
"properties": { | |
"object": { | |
"type": "string", | |
"enum": [ | |
"gen_config.embed" | |
], | |
"const": "gen_config.embed", | |
"title": "Object", | |
"description": "The object type, which is always \"gen_config.embed\".", | |
"default": "gen_config.embed", | |
"examples": [ | |
"gen_config.embed" | |
] | |
}, | |
"embedding_model": { | |
"type": "string", | |
"title": "Embedding Model", | |
"description": "The embedding model to use.", | |
"examples": [ | |
"openai/text-embedding-3-small-512" | |
] | |
}, | |
"source_column": { | |
"type": "string", | |
"title": "Source Column", | |
"description": "The source column for embedding.", | |
"examples": [ | |
"text_column" | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"embedding_model", | |
"source_column" | |
], | |
"title": "EmbedGenConfig" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Gen Config", | |
"description": "_Optional_. Generation config. If provided, then this column will be an \"Output Column\". Table columns on its left can be referenced by `${column-name}`." | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"id" | |
], | |
"title": "ColumnSchemaCreate" | |
}, | |
"type": "array", | |
"title": "Cols", | |
"description": "List of column schema." | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"id", | |
"cols" | |
], | |
"title": "AddActionColumnSchema" | |
}, | |
"AddChatColumnSchema": { | |
"properties": { | |
"id": { | |
"type": "string", | |
"pattern": "^[A-Za-z0-9]([A-Za-z0-9._-]{0,98}[A-Za-z0-9])?$", | |
"title": "Id", | |
"description": "Table name." | |
}, | |
"version": { | |
"type": "string", | |
"title": "Version", | |
"description": "Table version, following owl version.", | |
"default": "0.3.0" | |
}, | |
"meta": { | |
"type": "object", | |
"title": "Meta", | |
"description": "Additional metadata about the table.", | |
"default": {} | |
}, | |
"cols": { | |
"items": { | |
"properties": { | |
"id": { | |
"type": "string", | |
"pattern": "^[A-Za-z0-9]([A-Za-z0-9 _-]{0,98}[A-Za-z0-9])?$", | |
"title": "Id", | |
"description": "Column name." | |
}, | |
"dtype": { | |
"allOf": [ | |
{ | |
"type": "string", | |
"enum": [ | |
"int", | |
"float", | |
"bool", | |
"str" | |
], | |
"title": "DtypeCreateEnum" | |
} | |
], | |
"description": "Column data type, one of [\"int\", \"float\", \"bool\", \"str\"]", | |
"default": "str" | |
}, | |
"vlen": { | |
"type": "integer", | |
"minimum": 0, | |
"title": "Vlen", | |
"description": "_Optional_. Vector length. If this is larger than zero, then `dtype` must be one of the floating data types. Defaults to zero.", | |
"default": 0 | |
}, | |
"index": { | |
"type": "boolean", | |
"title": "Index", | |
"description": "_Optional_. Whether to build full-text-search (FTS) or vector index for this column. Only applies to string and vector columns. Defaults to True.", | |
"default": true | |
}, | |
"gen_config": { | |
"anyOf": [ | |
{ | |
"properties": { | |
"id": { | |
"type": "string", | |
"title": "Id", | |
"description": "Chat ID. Must be unique against document ID for it to be embeddable. Defaults to ''.", | |
"default": "" | |
}, | |
"model": { | |
"type": "string", | |
"title": "Model", | |
"description": "ID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API.", | |
"default": "" | |
}, | |
"messages": { | |
"items": { | |
"properties": { | |
"role": { | |
"type": "string", | |
"enum": [ | |
"system", | |
"user", | |
"assistant" | |
], | |
"title": "ChatRole", | |
"description": "Represents who said a chat message." | |
}, | |
"content": { | |
"type": "string", | |
"title": "Content" | |
}, | |
"name": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Name" | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"role", | |
"content" | |
], | |
"title": "ChatEntry", | |
"description": "Represents a message in the chat context." | |
}, | |
"type": "array", | |
"minItems": 1, | |
"title": "Messages", | |
"description": "A list of messages comprising the conversation so far." | |
}, | |
"rag_params": { | |
"anyOf": [ | |
{ | |
"properties": { | |
"table_id": { | |
"type": "string", | |
"minLength": 2, | |
"title": "Table Id", | |
"description": "Knowledge Table ID", | |
"examples": [ | |
"my-dataset" | |
] | |
}, | |
"reranking_model": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Reranking Model", | |
"description": "Reranking model to use for hybrid search.", | |
"examples": [ | |
"cohere/rerank-multilingual-v3.0", | |
null | |
] | |
}, | |
"search_query": { | |
"type": "string", | |
"title": "Search Query", | |
"description": "Query used to retrieve items from the KB database. If not provided (default), it will be generated using LLM.", | |
"default": "" | |
}, | |
"k": { | |
"type": "integer", | |
"maximum": 1024, | |
"exclusiveMinimum": 0, | |
"title": "K", | |
"description": "Top-k closest text in terms of embedding distance. Must be in [1, 1024]. Defaults to 3.", | |
"default": 3, | |
"examples": [ | |
3 | |
] | |
}, | |
"rerank": { | |
"type": "boolean", | |
"title": "Rerank", | |
"description": "Flag to perform rerank on the retrieved results. Defaults to True.", | |
"default": true, | |
"examples": [ | |
true, | |
false | |
] | |
}, | |
"concat_reranker_input": { | |
"type": "boolean", | |
"title": "Concat Reranker Input", | |
"description": "Flag to concat title and content as reranker input. Defaults to False.", | |
"default": false, | |
"examples": [ | |
true, | |
false | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"table_id" | |
], | |
"title": "RAGParams" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"description": "Retrieval Augmented Generation search params. Defaults to None (disabled).", | |
"examples": [ | |
null | |
] | |
}, | |
"temperature": { | |
"type": "number", | |
"maximum": 2, | |
"minimum": 0.001, | |
"title": "Temperature", | |
"description": " What sampling temperature to use, in [0.001, 2.0].\n Higher values like 0.8 will make the output more random,\n while lower values like 0.2 will make it more focused and deterministic.\n", | |
"default": 0.2, | |
"examples": [ | |
0.2 | |
] | |
}, | |
"top_p": { | |
"type": "number", | |
"maximum": 1, | |
"minimum": 0.001, | |
"title": "Top P", | |
"description": " An alternative to sampling with temperature, called nucleus sampling,\n where the model considers the results of the tokens with top_p probability mass.\n So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n Must be in [0.001, 1.0].\n", | |
"default": 0.6, | |
"examples": [ | |
0.6 | |
] | |
}, | |
"n": { | |
"type": "integer", | |
"title": "N", | |
"description": "How many chat completion choices to generate for each input message.", | |
"default": 1, | |
"examples": [ | |
1 | |
] | |
}, | |
"stream": { | |
"type": "boolean", | |
"title": "Stream", | |
"description": "\n If set, partial message deltas will be sent, like in ChatGPT.\n Tokens will be sent as server-sent events as they become available,\n with the stream terminated by a 'data: [DONE]' message.\n", | |
"default": true, | |
"examples": [ | |
true | |
] | |
}, | |
"stop": { | |
"anyOf": [ | |
{ | |
"items": { | |
"type": "string" | |
}, | |
"type": "array" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Stop", | |
"description": "Up to 4 sequences where the API will stop generating further tokens.", | |
"examples": [ | |
null | |
] | |
}, | |
"max_tokens": { | |
"type": "integer", | |
"exclusiveMinimum": 0, | |
"title": "Max Tokens", | |
"description": " The maximum number of tokens to generate in the chat completion.\n Must be in [1, context_length - 1). Default is 2048.\n The total length of input tokens and generated tokens is limited by the model's context length.\n", | |
"default": 2048, | |
"examples": [ | |
2048 | |
] | |
}, | |
"presence_penalty": { | |
"type": "number", | |
"title": "Presence Penalty", | |
"description": " Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far,\n increasing the model's likelihood to talk about new topics.\n", | |
"default": 0, | |
"examples": [ | |
0 | |
] | |
}, | |
"frequency_penalty": { | |
"type": "number", | |
"title": "Frequency Penalty", | |
"description": " Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far,\n decreasing the model's likelihood to repeat the same line verbatim.\n", | |
"default": 0, | |
"examples": [ | |
0 | |
] | |
}, | |
"logit_bias": { | |
"type": "object", | |
"title": "Logit Bias", | |
"description": " Modify the likelihood of specified tokens appearing in the completion.\n Accepts a json object that maps tokens (specified by their token ID in the tokenizer)\n to an associated bias value from -100 to 100.\n Mathematically, the bias is added to the logits generated by the model prior to sampling.\n The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection;\n values like -100 or 100 should result in a ban or exclusive selection of the relevant token.\n", | |
"default": {}, | |
"examples": [ | |
{} | |
] | |
}, | |
"user": { | |
"type": "string", | |
"title": "User", | |
"description": "A unique identifier representing your end-user. For monitoring and debugging purposes.", | |
"default": "", | |
"examples": [ | |
"" | |
] | |
}, | |
"object": { | |
"type": "string", | |
"enum": [ | |
"gen_config.chat" | |
], | |
"const": "gen_config.chat", | |
"title": "Object", | |
"description": "The object type, which is always \"gen_config.chat\".", | |
"default": "gen_config.chat", | |
"examples": [ | |
"gen_config.chat" | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"messages" | |
], | |
"title": "ChatGenConfig" | |
}, | |
{ | |
"properties": { | |
"object": { | |
"type": "string", | |
"enum": [ | |
"gen_config.embed" | |
], | |
"const": "gen_config.embed", | |
"title": "Object", | |
"description": "The object type, which is always \"gen_config.embed\".", | |
"default": "gen_config.embed", | |
"examples": [ | |
"gen_config.embed" | |
] | |
}, | |
"embedding_model": { | |
"type": "string", | |
"title": "Embedding Model", | |
"description": "The embedding model to use.", | |
"examples": [ | |
"openai/text-embedding-3-small-512" | |
] | |
}, | |
"source_column": { | |
"type": "string", | |
"title": "Source Column", | |
"description": "The source column for embedding.", | |
"examples": [ | |
"text_column" | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"embedding_model", | |
"source_column" | |
], | |
"title": "EmbedGenConfig" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Gen Config", | |
"description": "_Optional_. Generation config. If provided, then this column will be an \"Output Column\". Table columns on its left can be referenced by `${column-name}`." | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"id" | |
], | |
"title": "ColumnSchemaCreate" | |
}, | |
"type": "array", | |
"title": "Cols", | |
"description": "List of column schema." | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"id", | |
"cols" | |
], | |
"title": "AddChatColumnSchema" | |
}, | |
"AddKnowledgeColumnSchema": { | |
"properties": { | |
"id": { | |
"type": "string", | |
"pattern": "^[A-Za-z0-9]([A-Za-z0-9._-]{0,98}[A-Za-z0-9])?$", | |
"title": "Id", | |
"description": "Table name." | |
}, | |
"version": { | |
"type": "string", | |
"title": "Version", | |
"description": "Table version, following owl version.", | |
"default": "0.3.0" | |
}, | |
"meta": { | |
"type": "object", | |
"title": "Meta", | |
"description": "Additional metadata about the table.", | |
"default": {} | |
}, | |
"cols": { | |
"items": { | |
"properties": { | |
"id": { | |
"type": "string", | |
"pattern": "^[A-Za-z0-9]([A-Za-z0-9 _-]{0,98}[A-Za-z0-9])?$", | |
"title": "Id", | |
"description": "Column name." | |
}, | |
"dtype": { | |
"allOf": [ | |
{ | |
"type": "string", | |
"enum": [ | |
"int", | |
"float", | |
"bool", | |
"str" | |
], | |
"title": "DtypeCreateEnum" | |
} | |
], | |
"description": "Column data type, one of [\"int\", \"float\", \"bool\", \"str\"]", | |
"default": "str" | |
}, | |
"vlen": { | |
"type": "integer", | |
"minimum": 0, | |
"title": "Vlen", | |
"description": "_Optional_. Vector length. If this is larger than zero, then `dtype` must be one of the floating data types. Defaults to zero.", | |
"default": 0 | |
}, | |
"index": { | |
"type": "boolean", | |
"title": "Index", | |
"description": "_Optional_. Whether to build full-text-search (FTS) or vector index for this column. Only applies to string and vector columns. Defaults to True.", | |
"default": true | |
}, | |
"gen_config": { | |
"anyOf": [ | |
{ | |
"properties": { | |
"id": { | |
"type": "string", | |
"title": "Id", | |
"description": "Chat ID. Must be unique against document ID for it to be embeddable. Defaults to ''.", | |
"default": "" | |
}, | |
"model": { | |
"type": "string", | |
"title": "Model", | |
"description": "ID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API.", | |
"default": "" | |
}, | |
"messages": { | |
"items": { | |
"properties": { | |
"role": { | |
"type": "string", | |
"enum": [ | |
"system", | |
"user", | |
"assistant" | |
], | |
"title": "ChatRole", | |
"description": "Represents who said a chat message." | |
}, | |
"content": { | |
"type": "string", | |
"title": "Content" | |
}, | |
"name": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Name" | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"role", | |
"content" | |
], | |
"title": "ChatEntry", | |
"description": "Represents a message in the chat context." | |
}, | |
"type": "array", | |
"minItems": 1, | |
"title": "Messages", | |
"description": "A list of messages comprising the conversation so far." | |
}, | |
"rag_params": { | |
"anyOf": [ | |
{ | |
"properties": { | |
"table_id": { | |
"type": "string", | |
"minLength": 2, | |
"title": "Table Id", | |
"description": "Knowledge Table ID", | |
"examples": [ | |
"my-dataset" | |
] | |
}, | |
"reranking_model": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Reranking Model", | |
"description": "Reranking model to use for hybrid search.", | |
"examples": [ | |
"cohere/rerank-multilingual-v3.0", | |
null | |
] | |
}, | |
"search_query": { | |
"type": "string", | |
"title": "Search Query", | |
"description": "Query used to retrieve items from the KB database. If not provided (default), it will be generated using LLM.", | |
"default": "" | |
}, | |
"k": { | |
"type": "integer", | |
"maximum": 1024, | |
"exclusiveMinimum": 0, | |
"title": "K", | |
"description": "Top-k closest text in terms of embedding distance. Must be in [1, 1024]. Defaults to 3.", | |
"default": 3, | |
"examples": [ | |
3 | |
] | |
}, | |
"rerank": { | |
"type": "boolean", | |
"title": "Rerank", | |
"description": "Flag to perform rerank on the retrieved results. Defaults to True.", | |
"default": true, | |
"examples": [ | |
true, | |
false | |
] | |
}, | |
"concat_reranker_input": { | |
"type": "boolean", | |
"title": "Concat Reranker Input", | |
"description": "Flag to concat title and content as reranker input. Defaults to False.", | |
"default": false, | |
"examples": [ | |
true, | |
false | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"table_id" | |
], | |
"title": "RAGParams" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"description": "Retrieval Augmented Generation search params. Defaults to None (disabled).", | |
"examples": [ | |
null | |
] | |
}, | |
"temperature": { | |
"type": "number", | |
"maximum": 2, | |
"minimum": 0.001, | |
"title": "Temperature", | |
"description": " What sampling temperature to use, in [0.001, 2.0].\n Higher values like 0.8 will make the output more random,\n while lower values like 0.2 will make it more focused and deterministic.\n", | |
"default": 0.2, | |
"examples": [ | |
0.2 | |
] | |
}, | |
"top_p": { | |
"type": "number", | |
"maximum": 1, | |
"minimum": 0.001, | |
"title": "Top P", | |
"description": " An alternative to sampling with temperature, called nucleus sampling,\n where the model considers the results of the tokens with top_p probability mass.\n So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n Must be in [0.001, 1.0].\n", | |
"default": 0.6, | |
"examples": [ | |
0.6 | |
] | |
}, | |
"n": { | |
"type": "integer", | |
"title": "N", | |
"description": "How many chat completion choices to generate for each input message.", | |
"default": 1, | |
"examples": [ | |
1 | |
] | |
}, | |
"stream": { | |
"type": "boolean", | |
"title": "Stream", | |
"description": "\n If set, partial message deltas will be sent, like in ChatGPT.\n Tokens will be sent as server-sent events as they become available,\n with the stream terminated by a 'data: [DONE]' message.\n", | |
"default": true, | |
"examples": [ | |
true | |
] | |
}, | |
"stop": { | |
"anyOf": [ | |
{ | |
"items": { | |
"type": "string" | |
}, | |
"type": "array" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Stop", | |
"description": "Up to 4 sequences where the API will stop generating further tokens.", | |
"examples": [ | |
null | |
] | |
}, | |
"max_tokens": { | |
"type": "integer", | |
"exclusiveMinimum": 0, | |
"title": "Max Tokens", | |
"description": " The maximum number of tokens to generate in the chat completion.\n Must be in [1, context_length - 1). Default is 2048.\n The total length of input tokens and generated tokens is limited by the model's context length.\n", | |
"default": 2048, | |
"examples": [ | |
2048 | |
] | |
}, | |
"presence_penalty": { | |
"type": "number", | |
"title": "Presence Penalty", | |
"description": " Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far,\n increasing the model's likelihood to talk about new topics.\n", | |
"default": 0, | |
"examples": [ | |
0 | |
] | |
}, | |
"frequency_penalty": { | |
"type": "number", | |
"title": "Frequency Penalty", | |
"description": " Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far,\n decreasing the model's likelihood to repeat the same line verbatim.\n", | |
"default": 0, | |
"examples": [ | |
0 | |
] | |
}, | |
"logit_bias": { | |
"type": "object", | |
"title": "Logit Bias", | |
"description": " Modify the likelihood of specified tokens appearing in the completion.\n Accepts a json object that maps tokens (specified by their token ID in the tokenizer)\n to an associated bias value from -100 to 100.\n Mathematically, the bias is added to the logits generated by the model prior to sampling.\n The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection;\n values like -100 or 100 should result in a ban or exclusive selection of the relevant token.\n", | |
"default": {}, | |
"examples": [ | |
{} | |
] | |
}, | |
"user": { | |
"type": "string", | |
"title": "User", | |
"description": "A unique identifier representing your end-user. For monitoring and debugging purposes.", | |
"default": "", | |
"examples": [ | |
"" | |
] | |
}, | |
"object": { | |
"type": "string", | |
"enum": [ | |
"gen_config.chat" | |
], | |
"const": "gen_config.chat", | |
"title": "Object", | |
"description": "The object type, which is always \"gen_config.chat\".", | |
"default": "gen_config.chat", | |
"examples": [ | |
"gen_config.chat" | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"messages" | |
], | |
"title": "ChatGenConfig" | |
}, | |
{ | |
"properties": { | |
"object": { | |
"type": "string", | |
"enum": [ | |
"gen_config.embed" | |
], | |
"const": "gen_config.embed", | |
"title": "Object", | |
"description": "The object type, which is always \"gen_config.embed\".", | |
"default": "gen_config.embed", | |
"examples": [ | |
"gen_config.embed" | |
] | |
}, | |
"embedding_model": { | |
"type": "string", | |
"title": "Embedding Model", | |
"description": "The embedding model to use.", | |
"examples": [ | |
"openai/text-embedding-3-small-512" | |
] | |
}, | |
"source_column": { | |
"type": "string", | |
"title": "Source Column", | |
"description": "The source column for embedding.", | |
"examples": [ | |
"text_column" | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"embedding_model", | |
"source_column" | |
], | |
"title": "EmbedGenConfig" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Gen Config", | |
"description": "_Optional_. Generation config. If provided, then this column will be an \"Output Column\". Table columns on its left can be referenced by `${column-name}`." | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"id" | |
], | |
"title": "ColumnSchemaCreate" | |
}, | |
"type": "array", | |
"title": "Cols", | |
"description": "List of column schema." | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"id", | |
"cols" | |
], | |
"title": "AddKnowledgeColumnSchema" | |
}, | |
"Body_import_table": { | |
"properties": { | |
"file": { | |
"type": "string", | |
"format": "binary", | |
"title": "File", | |
"description": "The parquet file." | |
}, | |
"table_id_dst": { | |
"type": "string", | |
"pattern": "^[A-Za-z0-9]([A-Za-z0-9._-]{0,98}[A-Za-z0-9])?$", | |
"title": "Table Id Dst", | |
"description": "The ID or name of the new table." | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"file", | |
"table_id_dst" | |
], | |
"title": "Body_import_table" | |
}, | |
"Body_import_table_data": { | |
"properties": { | |
"file": { | |
"type": "string", | |
"format": "binary", | |
"title": "File", | |
"description": "The CSV or TSV file." | |
}, | |
"file_name": { | |
"type": "string", | |
"title": "File Name", | |
"description": "File name." | |
}, | |
"table_id": { | |
"type": "string", | |
"pattern": "^[A-Za-z0-9]([A-Za-z0-9._-]{0,98}[A-Za-z0-9])?$", | |
"title": "Table Id", | |
"description": "ID or name of the table that the data should be imported into." | |
}, | |
"stream": { | |
"type": "boolean", | |
"title": "Stream", | |
"description": "Whether or not to stream the LLM generation.", | |
"default": true | |
}, | |
"delimiter": { | |
"allOf": [ | |
{ | |
"type": "string", | |
"enum": [ | |
",", | |
"\t" | |
], | |
"title": "CSVDelimiter" | |
} | |
], | |
"description": "The delimiter, can be \",\" or \"\\t\". Defaults to \",\".", | |
"default": "," | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"file", | |
"file_name", | |
"table_id" | |
], | |
"title": "Body_import_table_data" | |
}, | |
"Body_upload_file": { | |
"properties": { | |
"file": { | |
"type": "string", | |
"format": "binary", | |
"title": "File", | |
"description": "The file." | |
}, | |
"file_name": { | |
"type": "string", | |
"title": "File Name", | |
"description": "File name." | |
}, | |
"table_id": { | |
"type": "string", | |
"pattern": "^[A-Za-z0-9]([A-Za-z0-9._-]{0,98}[A-Za-z0-9])?$", | |
"title": "Table Id", | |
"description": "Knowledge Table ID." | |
}, | |
"chunk_size": { | |
"type": "integer", | |
"exclusiveMinimum": 0, | |
"title": "Chunk Size", | |
"description": "Maximum chunk size (number of characters). Must be > 0.", | |
"default": 1000 | |
}, | |
"chunk_overlap": { | |
"type": "integer", | |
"minimum": 0, | |
"title": "Chunk Overlap", | |
"description": "Overlap in characters between chunks. Must be >= 0.", | |
"default": 200 | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"file", | |
"file_name", | |
"table_id" | |
], | |
"title": "Body_upload_file" | |
}, | |
"CSVDelimiter": { | |
"type": "string", | |
"enum": [ | |
",", | |
"\t" | |
], | |
"title": "CSVDelimiter" | |
}, | |
"ChatEntry": { | |
"properties": { | |
"role": { | |
"type": "string", | |
"enum": [ | |
"system", | |
"user", | |
"assistant" | |
], | |
"title": "ChatRole", | |
"description": "Represents who said a chat message." | |
}, | |
"content": { | |
"type": "string", | |
"title": "Content" | |
}, | |
"name": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Name" | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"role", | |
"content" | |
], | |
"title": "ChatEntry", | |
"description": "Represents a message in the chat context." | |
}, | |
"ChatGenConfig-Input": { | |
"properties": { | |
"id": { | |
"type": "string", | |
"title": "Id", | |
"description": "Chat ID. Must be unique against document ID for it to be embeddable. Defaults to ''.", | |
"default": "" | |
}, | |
"model": { | |
"type": "string", | |
"title": "Model", | |
"description": "ID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API.", | |
"default": "" | |
}, | |
"messages": { | |
"items": { | |
"properties": { | |
"role": { | |
"type": "string", | |
"enum": [ | |
"system", | |
"user", | |
"assistant" | |
], | |
"title": "ChatRole", | |
"description": "Represents who said a chat message." | |
}, | |
"content": { | |
"type": "string", | |
"title": "Content" | |
}, | |
"name": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Name" | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"role", | |
"content" | |
], | |
"title": "ChatEntry", | |
"description": "Represents a message in the chat context." | |
}, | |
"type": "array", | |
"minItems": 1, | |
"title": "Messages", | |
"description": "A list of messages comprising the conversation so far." | |
}, | |
"rag_params": { | |
"anyOf": [ | |
{ | |
"properties": { | |
"table_id": { | |
"type": "string", | |
"minLength": 2, | |
"title": "Table Id", | |
"description": "Knowledge Table ID", | |
"examples": [ | |
"my-dataset" | |
] | |
}, | |
"reranking_model": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Reranking Model", | |
"description": "Reranking model to use for hybrid search.", | |
"examples": [ | |
"cohere/rerank-multilingual-v3.0", | |
null | |
] | |
}, | |
"search_query": { | |
"type": "string", | |
"title": "Search Query", | |
"description": "Query used to retrieve items from the KB database. If not provided (default), it will be generated using LLM.", | |
"default": "" | |
}, | |
"k": { | |
"type": "integer", | |
"maximum": 1024, | |
"exclusiveMinimum": 0, | |
"title": "K", | |
"description": "Top-k closest text in terms of embedding distance. Must be in [1, 1024]. Defaults to 3.", | |
"default": 3, | |
"examples": [ | |
3 | |
] | |
}, | |
"rerank": { | |
"type": "boolean", | |
"title": "Rerank", | |
"description": "Flag to perform rerank on the retrieved results. Defaults to True.", | |
"default": true, | |
"examples": [ | |
true, | |
false | |
] | |
}, | |
"concat_reranker_input": { | |
"type": "boolean", | |
"title": "Concat Reranker Input", | |
"description": "Flag to concat title and content as reranker input. Defaults to False.", | |
"default": false, | |
"examples": [ | |
true, | |
false | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"table_id" | |
], | |
"title": "RAGParams" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"description": "Retrieval Augmented Generation search params. Defaults to None (disabled).", | |
"examples": [ | |
null | |
] | |
}, | |
"temperature": { | |
"type": "number", | |
"maximum": 2, | |
"minimum": 0.001, | |
"title": "Temperature", | |
"description": " What sampling temperature to use, in [0.001, 2.0].\n Higher values like 0.8 will make the output more random,\n while lower values like 0.2 will make it more focused and deterministic.\n", | |
"default": 0.2, | |
"examples": [ | |
0.2 | |
] | |
}, | |
"top_p": { | |
"type": "number", | |
"maximum": 1, | |
"minimum": 0.001, | |
"title": "Top P", | |
"description": " An alternative to sampling with temperature, called nucleus sampling,\n where the model considers the results of the tokens with top_p probability mass.\n So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n Must be in [0.001, 1.0].\n", | |
"default": 0.6, | |
"examples": [ | |
0.6 | |
] | |
}, | |
"n": { | |
"type": "integer", | |
"title": "N", | |
"description": "How many chat completion choices to generate for each input message.", | |
"default": 1, | |
"examples": [ | |
1 | |
] | |
}, | |
"stream": { | |
"type": "boolean", | |
"title": "Stream", | |
"description": "\n If set, partial message deltas will be sent, like in ChatGPT.\n Tokens will be sent as server-sent events as they become available,\n with the stream terminated by a 'data: [DONE]' message.\n", | |
"default": true, | |
"examples": [ | |
true | |
] | |
}, | |
"stop": { | |
"anyOf": [ | |
{ | |
"items": { | |
"type": "string" | |
}, | |
"type": "array" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Stop", | |
"description": "Up to 4 sequences where the API will stop generating further tokens.", | |
"examples": [ | |
null | |
] | |
}, | |
"max_tokens": { | |
"type": "integer", | |
"exclusiveMinimum": 0, | |
"title": "Max Tokens", | |
"description": " The maximum number of tokens to generate in the chat completion.\n Must be in [1, context_length - 1). Default is 2048.\n The total length of input tokens and generated tokens is limited by the model's context length.\n", | |
"default": 2048, | |
"examples": [ | |
2048 | |
] | |
}, | |
"presence_penalty": { | |
"type": "number", | |
"title": "Presence Penalty", | |
"description": " Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far,\n increasing the model's likelihood to talk about new topics.\n", | |
"default": 0, | |
"examples": [ | |
0 | |
] | |
}, | |
"frequency_penalty": { | |
"type": "number", | |
"title": "Frequency Penalty", | |
"description": " Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far,\n decreasing the model's likelihood to repeat the same line verbatim.\n", | |
"default": 0, | |
"examples": [ | |
0 | |
] | |
}, | |
"logit_bias": { | |
"type": "object", | |
"title": "Logit Bias", | |
"description": " Modify the likelihood of specified tokens appearing in the completion.\n Accepts a json object that maps tokens (specified by their token ID in the tokenizer)\n to an associated bias value from -100 to 100.\n Mathematically, the bias is added to the logits generated by the model prior to sampling.\n The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection;\n values like -100 or 100 should result in a ban or exclusive selection of the relevant token.\n", | |
"default": {}, | |
"examples": [ | |
{} | |
] | |
}, | |
"user": { | |
"type": "string", | |
"title": "User", | |
"description": "A unique identifier representing your end-user. For monitoring and debugging purposes.", | |
"default": "", | |
"examples": [ | |
"" | |
] | |
}, | |
"object": { | |
"type": "string", | |
"enum": [ | |
"gen_config.chat" | |
], | |
"const": "gen_config.chat", | |
"title": "Object", | |
"description": "The object type, which is always \"gen_config.chat\".", | |
"default": "gen_config.chat", | |
"examples": [ | |
"gen_config.chat" | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"messages" | |
], | |
"title": "ChatGenConfig" | |
}, | |
"ChatGenConfig-Output": { | |
"properties": { | |
"id": { | |
"type": "string", | |
"title": "Id", | |
"description": "Chat ID. Must be unique against document ID for it to be embeddable. Defaults to ''.", | |
"default": "" | |
}, | |
"model": { | |
"type": "string", | |
"title": "Model", | |
"description": "ID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API.", | |
"default": "" | |
}, | |
"messages": { | |
"items": { | |
"properties": { | |
"role": { | |
"type": "string", | |
"enum": [ | |
"system", | |
"user", | |
"assistant" | |
], | |
"title": "ChatRole", | |
"description": "Represents who said a chat message." | |
}, | |
"content": { | |
"type": "string", | |
"title": "Content" | |
}, | |
"name": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Name" | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"role", | |
"content" | |
], | |
"title": "ChatEntry", | |
"description": "Represents a message in the chat context." | |
}, | |
"type": "array", | |
"minItems": 1, | |
"title": "Messages", | |
"description": "A list of messages comprising the conversation so far." | |
}, | |
"rag_params": { | |
"anyOf": [ | |
{ | |
"properties": { | |
"table_id": { | |
"type": "string", | |
"minLength": 2, | |
"title": "Table Id", | |
"description": "Knowledge Table ID", | |
"examples": [ | |
"my-dataset" | |
] | |
}, | |
"reranking_model": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Reranking Model", | |
"description": "Reranking model to use for hybrid search.", | |
"examples": [ | |
"cohere/rerank-multilingual-v3.0", | |
null | |
] | |
}, | |
"search_query": { | |
"type": "string", | |
"title": "Search Query", | |
"description": "Query used to retrieve items from the KB database. If not provided (default), it will be generated using LLM.", | |
"default": "" | |
}, | |
"k": { | |
"type": "integer", | |
"maximum": 1024, | |
"exclusiveMinimum": 0, | |
"title": "K", | |
"description": "Top-k closest text in terms of embedding distance. Must be in [1, 1024]. Defaults to 3.", | |
"default": 3, | |
"examples": [ | |
3 | |
] | |
}, | |
"rerank": { | |
"type": "boolean", | |
"title": "Rerank", | |
"description": "Flag to perform rerank on the retrieved results. Defaults to True.", | |
"default": true, | |
"examples": [ | |
true, | |
false | |
] | |
}, | |
"concat_reranker_input": { | |
"type": "boolean", | |
"title": "Concat Reranker Input", | |
"description": "Flag to concat title and content as reranker input. Defaults to False.", | |
"default": false, | |
"examples": [ | |
true, | |
false | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"table_id" | |
], | |
"title": "RAGParams" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"description": "Retrieval Augmented Generation search params. Defaults to None (disabled).", | |
"examples": [ | |
null | |
] | |
}, | |
"temperature": { | |
"type": "number", | |
"maximum": 2, | |
"minimum": 0.001, | |
"title": "Temperature", | |
"description": " What sampling temperature to use, in [0.001, 2.0].\n Higher values like 0.8 will make the output more random,\n while lower values like 0.2 will make it more focused and deterministic.\n", | |
"default": 0.2, | |
"examples": [ | |
0.2 | |
] | |
}, | |
"top_p": { | |
"type": "number", | |
"maximum": 1, | |
"minimum": 0.001, | |
"title": "Top P", | |
"description": " An alternative to sampling with temperature, called nucleus sampling,\n where the model considers the results of the tokens with top_p probability mass.\n So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n Must be in [0.001, 1.0].\n", | |
"default": 0.6, | |
"examples": [ | |
0.6 | |
] | |
}, | |
"n": { | |
"type": "integer", | |
"title": "N", | |
"description": "How many chat completion choices to generate for each input message.", | |
"default": 1, | |
"examples": [ | |
1 | |
] | |
}, | |
"stream": { | |
"type": "boolean", | |
"title": "Stream", | |
"description": "\n If set, partial message deltas will be sent, like in ChatGPT.\n Tokens will be sent as server-sent events as they become available,\n with the stream terminated by a 'data: [DONE]' message.\n", | |
"default": true, | |
"examples": [ | |
true | |
] | |
}, | |
"stop": { | |
"anyOf": [ | |
{ | |
"items": { | |
"type": "string" | |
}, | |
"type": "array" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Stop", | |
"description": "Up to 4 sequences where the API will stop generating further tokens.", | |
"examples": [ | |
null | |
] | |
}, | |
"max_tokens": { | |
"type": "integer", | |
"exclusiveMinimum": 0, | |
"title": "Max Tokens", | |
"description": " The maximum number of tokens to generate in the chat completion.\n Must be in [1, context_length - 1). Default is 2048.\n The total length of input tokens and generated tokens is limited by the model's context length.\n", | |
"default": 2048, | |
"examples": [ | |
2048 | |
] | |
}, | |
"presence_penalty": { | |
"type": "number", | |
"title": "Presence Penalty", | |
"description": " Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far,\n increasing the model's likelihood to talk about new topics.\n", | |
"default": 0, | |
"examples": [ | |
0 | |
] | |
}, | |
"frequency_penalty": { | |
"type": "number", | |
"title": "Frequency Penalty", | |
"description": " Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far,\n decreasing the model's likelihood to repeat the same line verbatim.\n", | |
"default": 0, | |
"examples": [ | |
0 | |
] | |
}, | |
"logit_bias": { | |
"type": "object", | |
"title": "Logit Bias", | |
"description": " Modify the likelihood of specified tokens appearing in the completion.\n Accepts a json object that maps tokens (specified by their token ID in the tokenizer)\n to an associated bias value from -100 to 100.\n Mathematically, the bias is added to the logits generated by the model prior to sampling.\n The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection;\n values like -100 or 100 should result in a ban or exclusive selection of the relevant token.\n", | |
"default": {}, | |
"examples": [ | |
{} | |
] | |
}, | |
"user": { | |
"type": "string", | |
"title": "User", | |
"description": "A unique identifier representing your end-user. For monitoring and debugging purposes.", | |
"default": "", | |
"examples": [ | |
"" | |
] | |
}, | |
"object": { | |
"type": "string", | |
"enum": [ | |
"gen_config.chat" | |
], | |
"const": "gen_config.chat", | |
"title": "Object", | |
"description": "The object type, which is always \"gen_config.chat\".", | |
"default": "gen_config.chat", | |
"examples": [ | |
"gen_config.chat" | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"messages" | |
], | |
"title": "ChatGenConfig" | |
}, | |
"ChatRequest": { | |
"properties": { | |
"id": { | |
"type": "string", | |
"title": "Id", | |
"description": "Chat ID. Must be unique against document ID for it to be embeddable. Defaults to ''.", | |
"default": "" | |
}, | |
"model": { | |
"type": "string", | |
"title": "Model", | |
"description": "ID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API.", | |
"default": "" | |
}, | |
"messages": { | |
"items": { | |
"properties": { | |
"role": { | |
"type": "string", | |
"enum": [ | |
"system", | |
"user", | |
"assistant" | |
], | |
"title": "ChatRole", | |
"description": "Represents who said a chat message." | |
}, | |
"content": { | |
"type": "string", | |
"title": "Content" | |
}, | |
"name": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Name" | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"role", | |
"content" | |
], | |
"title": "ChatEntry", | |
"description": "Represents a message in the chat context." | |
}, | |
"type": "array", | |
"minItems": 1, | |
"title": "Messages", | |
"description": "A list of messages comprising the conversation so far." | |
}, | |
"rag_params": { | |
"anyOf": [ | |
{ | |
"properties": { | |
"table_id": { | |
"type": "string", | |
"minLength": 2, | |
"title": "Table Id", | |
"description": "Knowledge Table ID", | |
"examples": [ | |
"my-dataset" | |
] | |
}, | |
"reranking_model": { | |
"anyOf": [ | |
{ | |
"type": "string" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Reranking Model", | |
"description": "Reranking model to use for hybrid search.", | |
"examples": [ | |
"cohere/rerank-multilingual-v3.0", | |
null | |
] | |
}, | |
"search_query": { | |
"type": "string", | |
"title": "Search Query", | |
"description": "Query used to retrieve items from the KB database. If not provided (default), it will be generated using LLM.", | |
"default": "" | |
}, | |
"k": { | |
"type": "integer", | |
"maximum": 1024, | |
"exclusiveMinimum": 0, | |
"title": "K", | |
"description": "Top-k closest text in terms of embedding distance. Must be in [1, 1024]. Defaults to 3.", | |
"default": 3, | |
"examples": [ | |
3 | |
] | |
}, | |
"rerank": { | |
"type": "boolean", | |
"title": "Rerank", | |
"description": "Flag to perform rerank on the retrieved results. Defaults to True.", | |
"default": true, | |
"examples": [ | |
true, | |
false | |
] | |
}, | |
"concat_reranker_input": { | |
"type": "boolean", | |
"title": "Concat Reranker Input", | |
"description": "Flag to concat title and content as reranker input. Defaults to False.", | |
"default": false, | |
"examples": [ | |
true, | |
false | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"table_id" | |
], | |
"title": "RAGParams" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"description": "Retrieval Augmented Generation search params. Defaults to None (disabled).", | |
"examples": [ | |
null | |
] | |
}, | |
"temperature": { | |
"type": "number", | |
"maximum": 2, | |
"minimum": 0.001, | |
"title": "Temperature", | |
"description": " What sampling temperature to use, in [0.001, 2.0].\n Higher values like 0.8 will make the output more random,\n while lower values like 0.2 will make it more focused and deterministic.\n", | |
"default": 0.2, | |
"examples": [ | |
0.2 | |
] | |
}, | |
"top_p": { | |
"type": "number", | |
"maximum": 1, | |
"minimum": 0.001, | |
"title": "Top P", | |
"description": " An alternative to sampling with temperature, called nucleus sampling,\n where the model considers the results of the tokens with top_p probability mass.\n So 0.1 means only the tokens comprising the top 10% probability mass are considered.\n Must be in [0.001, 1.0].\n", | |
"default": 0.6, | |
"examples": [ | |
0.6 | |
] | |
}, | |
"n": { | |
"type": "integer", | |
"title": "N", | |
"description": "How many chat completion choices to generate for each input message.", | |
"default": 1, | |
"examples": [ | |
1 | |
] | |
}, | |
"stream": { | |
"type": "boolean", | |
"title": "Stream", | |
"description": "\n If set, partial message deltas will be sent, like in ChatGPT.\n Tokens will be sent as server-sent events as they become available,\n with the stream terminated by a 'data: [DONE]' message.\n", | |
"default": true, | |
"examples": [ | |
true | |
] | |
}, | |
"stop": { | |
"anyOf": [ | |
{ | |
"items": { | |
"type": "string" | |
}, | |
"type": "array" | |
}, | |
{ | |
"type": "null" | |
} | |
], | |
"title": "Stop", | |
"description": "Up to 4 sequences where the API will stop generating further tokens.", | |
"examples": [ | |
null | |
] | |
}, | |
"max_tokens": { | |
"type": "integer", | |
"exclusiveMinimum": 0, | |
"title": "Max Tokens", | |
"description": " The maximum number of tokens to generate in the chat completion.\n Must be in [1, context_length - 1). Default is 2048.\n The total length of input tokens and generated tokens is limited by the model's context length.\n", | |
"default": 2048, | |
"examples": [ | |
2048 | |
] | |
}, | |
"presence_penalty": { | |
"type": "number", | |
"title": "Presence Penalty", | |
"description": " Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far,\n increasing the model's likelihood to talk about new topics.\n", | |
"default": 0, | |
"examples": [ | |
0 | |
] | |
}, | |
"frequency_penalty": { | |
"type": "number", | |
"title": "Frequency Penalty", | |
"description": " Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far,\n decreasing the model's likelihood to repeat the same line verbatim.\n", | |
"default": 0, | |
"examples": [ | |
0 | |
] | |
}, | |
"logit_bias": { | |
"type": "object", | |
"title": "Logit Bias", | |
"description": " Modify the likelihood of specified tokens appearing in the completion.\n Accepts a json object that maps tokens (specified by their token ID in the tokenizer)\n to an associated bias value from -100 to 100.\n Mathematically, the bias is added to the logits generated by the model prior to sampling.\n The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection;\n values like -100 or 100 should result in a ban or exclusive selection of the relevant token.\n", | |
"default": {}, | |
"examples": [ | |
{} | |
] | |
}, | |
"user": { | |
"type": "string", | |
"title": "User", | |
"description": "A unique identifier representing your end-user. For monitoring and debugging purposes.", | |
"default": "", | |
"examples": [ | |
"" | |
] | |
} | |
}, | |
"type": "object", | |
"required": [ | |
"messages" | |
], | |
"title": "ChatRequest" | |
}, | |
"ChatRole": { | |
"type": "string", | |
"enum": [ | |
"system", | |
"user", | |
"assistant" | |
], | |
"title": "ChatRole", | |
"description": "Represents who said a chat message." | |
}, | |
"ChatTableSchemaCreate": { | |
"properties": { | |
"id": { | |
"type": "string", | |
"pattern": "^[A-Za-z0-9]([A-Za-z0-9._-]{0,98}[A-Za-z0-9])?$", | |
"title": "Id", | |
"description": "Table name." | |
}, | |
"version": { | |
"type": "string", | |
"title": "Version", | |
"description": "Table version, following owl version.", | |
"default": "0.3.0" | |
}, | |
"meta": { | |
"type": "object", | |
"title": "Meta", | |
"description": "Additional metadata about the table.", | |
"default": {} | |
}, | |
"cols": { | |
"items": { | |
"properties": { | |
"id": { | |
"type": "string", | |
"pattern": "^[A-Za-z0-9]([A-Za-z0-9 _-]{0,98}[A-Za-z0-9])?$", | |
"title": "Id", | |
"description": "Column name." | |
}, | |
"dtype": { | |
"allOf": [ | |
{ | |
"type": "string", | |
"enum": [ | |
"int", | |
"float", | |
"bool", | |
"str" | |
], | |
"title": "DtypeCreateEnum" | |
} | |
], | |
"description": "Column data type, one of [\"int\", \"float\", \"bool\", \"str\"]", | |
"default": "str" | |
}, | |
"vlen": { | |
"type": "integer", | |
"minimum": 0, | |
"title": "Vlen", | |
"description": "_Optional_. Vector length. If this is larger than zero, then `dtype` must be one of the floating data types. Defaults to zero.", | |
"default": 0 | |
}, | |
"index": { | |
"type": "boolean", | |
"title": "Index", | |
"description": "_Optional_. Whether to build full-text-search (FTS) or vector index for this column. Only applies to string and vector columns. Defaults to True.", | |
"default": true | |
}, | |
"gen_config": { | |
"anyOf": [ | |
{ | |
"properties": { | |
"id": { | |
"type": "string", | |
"title": "Id", | |
"description": "Chat ID. Must be unique against document ID for it to be embeddable. Defaults to ''.", | |
"default": "" | |
}, | |
"model": { | |
"type": "string", | |
"title": "Model", | |
"description": "ID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API.", | |
"default": "" | |
}, | |
"messages": { | |
"items": { | |
"properties": { | |
"role": { | |
"type": "string", | |
"enum": [ | |
"system", | |
"user", | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment