Created
May 28, 2025 01:36
-
-
Save devoncarew/2a3405c73205d061e4fe3c6efb107a3e to your computer and use it in GitHub Desktop.
generativelanguage API generated from the protos
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
// Copyright 2025 Google LLC | |
// | |
// Licensed under the Apache License, Version 2.0 (the "License"); | |
// you may not use this file except in compliance with the License. | |
// You may obtain a copy of the License at | |
// | |
// https://www.apache.org/licenses/LICENSE-2.0 | |
// | |
// Unless required by applicable law or agreed to in writing, software | |
// distributed under the License is distributed on an "AS IS" BASIS, | |
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
// See the License for the specific language governing permissions and | |
// limitations under the License. | |
// | |
// Code generated by sidekick. DO NOT EDIT. | |
/// The Google Cloud client for the Generative Language API. | |
/// | |
/// The Gemini API allows developers to build generative AI applications using | |
/// Gemini models. Gemini is our most capable model, built from the ground up | |
/// to be multimodal. It can generalize and seamlessly understand, operate | |
/// across, and combine different types of information including language, | |
/// images, audio, video, and code. You can use the Gemini API for use cases | |
/// like reasoning across text and images, content generation, dialogue | |
/// agents, summarization and classification systems, and more. | |
library; | |
import 'package:google_cloud_gax/gax.dart'; | |
import 'package:google_cloud_gax/src/encoding.dart'; | |
import 'package:google_cloud_longrunning/longrunning.dart'; | |
import 'package:google_cloud_protobuf/protobuf.dart'; | |
import 'package:google_cloud_rpc/rpc.dart'; | |
import 'package:http/http.dart' as http; | |
/// API for using Large Models that generate multimodal content and have | |
/// additional capabilities beyond text generation. | |
class GenerativeService { | |
static const String _host = 'generativelanguage.googleapis.com'; | |
final ServiceClient _client; | |
GenerativeService({required http.Client client}) | |
: _client = ServiceClient(client: client); | |
/// Generates a model response given an input `GenerateContentRequest`. | |
/// Refer to the [text generation | |
/// guide](https://ai.google.dev/gemini-api/docs/text-generation) for detailed | |
/// usage information. Input capabilities differ between models, including | |
/// tuned models. Refer to the [model | |
/// guide](https://ai.google.dev/gemini-api/docs/models/gemini) and [tuning | |
/// guide](https://ai.google.dev/gemini-api/docs/model-tuning) for details. | |
Future<GenerateContentResponse> generateContent( | |
GenerateContentRequest request) async { | |
final url = Uri.https(_host, '/v1/${request.model}:generateContent'); | |
final response = await _client.post(url, body: request); | |
return GenerateContentResponse.fromJson(response); | |
} | |
/// Generates a text embedding vector from the input `Content` using the | |
/// specified [Gemini Embedding | |
/// model](https://ai.google.dev/gemini-api/docs/models/gemini#text-embedding). | |
Future<EmbedContentResponse> embedContent(EmbedContentRequest request) async { | |
final url = Uri.https(_host, '/v1/${request.model}:embedContent'); | |
final response = await _client.post(url, body: request); | |
return EmbedContentResponse.fromJson(response); | |
} | |
/// Generates multiple embedding vectors from the input `Content` which | |
/// consists of a batch of strings represented as `EmbedContentRequest` | |
/// objects. | |
Future<BatchEmbedContentsResponse> batchEmbedContents( | |
BatchEmbedContentsRequest request) async { | |
final url = Uri.https(_host, '/v1/${request.model}:batchEmbedContents'); | |
final response = await _client.post(url, body: request); | |
return BatchEmbedContentsResponse.fromJson(response); | |
} | |
/// Runs a model's tokenizer on input `Content` and returns the token count. | |
/// Refer to the [tokens guide](https://ai.google.dev/gemini-api/docs/tokens) | |
/// to learn more about tokens. | |
Future<CountTokensResponse> countTokens(CountTokensRequest request) async { | |
final url = Uri.https(_host, '/v1/${request.model}:countTokens'); | |
final response = await _client.post(url, body: request); | |
return CountTokensResponse.fromJson(response); | |
} | |
/// Provides the `Operations` service functionality in this service. | |
Future<ListOperationsResponse> listOperations( | |
ListOperationsRequest request) async { | |
final url = Uri.https(_host, '/v1/${request.name}', { | |
if (request.filter != null) 'filter': request.filter!, | |
if (request.pageSize != null) 'pageSize': '${request.pageSize}', | |
if (request.pageToken != null) 'pageToken': request.pageToken!, | |
}); | |
final response = await _client.get(url); | |
return ListOperationsResponse.fromJson(response); | |
} | |
/// Provides the `Operations` service functionality in this service. | |
/// | |
/// This method can be used to get the current status of a long-running | |
/// operation. | |
Future<Operation<T, S>> | |
getOperation<T extends ProtoMessage, S extends ProtoMessage>( | |
Operation<T, S> request) async { | |
final url = Uri.https(_host, '/v1/${request.name}'); | |
final response = await _client.get(url); | |
return Operation.fromJson(response, request.operationHelper); | |
} | |
/// Provides the `Operations` service functionality in this service. | |
Future<void> cancelOperation(CancelOperationRequest request) async { | |
final url = Uri.https(_host, '/v1/${request.name}:cancel'); | |
await _client.post(url, body: request); | |
} | |
/// Closes the client and cleans up any resources associated with it. | |
/// | |
/// Once [close] is called, no other methods should be called. | |
void close() => _client.close(); | |
} | |
/// Provides methods for getting metadata information about Generative Models. | |
class ModelService { | |
static const String _host = 'generativelanguage.googleapis.com'; | |
final ServiceClient _client; | |
ModelService({required http.Client client}) | |
: _client = ServiceClient(client: client); | |
/// Gets information about a specific `Model` such as its version number, token | |
/// limits, | |
/// [parameters](https://ai.google.dev/gemini-api/docs/models/generative-models#model-parameters) | |
/// and other metadata. Refer to the [Gemini models | |
/// guide](https://ai.google.dev/gemini-api/docs/models/gemini) for detailed | |
/// model information. | |
Future<Model> getModel(GetModelRequest request) async { | |
final url = Uri.https(_host, '/v1/${request.name}'); | |
final response = await _client.get(url); | |
return Model.fromJson(response); | |
} | |
/// Lists the [`Model`s](https://ai.google.dev/gemini-api/docs/models/gemini) | |
/// available through the Gemini API. | |
Future<ListModelsResponse> listModels(ListModelsRequest request) async { | |
final url = Uri.https(_host, '/v1/models', { | |
if (request.pageSize != null) 'pageSize': '${request.pageSize}', | |
if (request.pageToken != null) 'pageToken': request.pageToken!, | |
}); | |
final response = await _client.get(url); | |
return ListModelsResponse.fromJson(response); | |
} | |
/// Provides the `Operations` service functionality in this service. | |
Future<ListOperationsResponse> listOperations( | |
ListOperationsRequest request) async { | |
final url = Uri.https(_host, '/v1/${request.name}', { | |
if (request.filter != null) 'filter': request.filter!, | |
if (request.pageSize != null) 'pageSize': '${request.pageSize}', | |
if (request.pageToken != null) 'pageToken': request.pageToken!, | |
}); | |
final response = await _client.get(url); | |
return ListOperationsResponse.fromJson(response); | |
} | |
/// Provides the `Operations` service functionality in this service. | |
/// | |
/// This method can be used to get the current status of a long-running | |
/// operation. | |
Future<Operation<T, S>> | |
getOperation<T extends ProtoMessage, S extends ProtoMessage>( | |
Operation<T, S> request) async { | |
final url = Uri.https(_host, '/v1/${request.name}'); | |
final response = await _client.get(url); | |
return Operation.fromJson(response, request.operationHelper); | |
} | |
/// Provides the `Operations` service functionality in this service. | |
Future<void> cancelOperation(CancelOperationRequest request) async { | |
final url = Uri.https(_host, '/v1/${request.name}:cancel'); | |
await _client.post(url, body: request); | |
} | |
/// Closes the client and cleans up any resources associated with it. | |
/// | |
/// Once [close] is called, no other methods should be called. | |
void close() => _client.close(); | |
} | |
/// A collection of source attributions for a piece of content. | |
class CitationMetadata extends ProtoMessage { | |
static const String fullyQualifiedName = | |
'google.ai.generativelanguage.v1.CitationMetadata'; | |
/// Citations to sources for a specific response. | |
final List<CitationSource>? citationSources; | |
CitationMetadata({ | |
this.citationSources, | |
}) : super(fullyQualifiedName); | |
factory CitationMetadata.fromJson(Map<String, dynamic> json) { | |
return CitationMetadata( | |
citationSources: | |
decodeListMessage(json['citationSources'], CitationSource.fromJson), | |
); | |
} | |
@override | |
Object toJson() { | |
return { | |
if (citationSources != null) | |
'citationSources': encodeList(citationSources), | |
}; | |
} | |
@override | |
String toString() => 'CitationMetadata()'; | |
} | |
/// A citation to a source for a portion of a specific response. | |
class CitationSource extends ProtoMessage { | |
static const String fullyQualifiedName = | |
'google.ai.generativelanguage.v1.CitationSource'; | |
/// Optional. Start of segment of the response that is attributed to this | |
/// source. | |
/// | |
/// Index indicates the start of the segment, measured in bytes. | |
final int? startIndex; | |
/// Optional. End of the attributed segment, exclusive. | |
final int? endIndex; | |
/// Optional. URI that is attributed as a source for a portion of the text. | |
final String? uri; | |
/// Optional. License for the GitHub project that is attributed as a source for | |
/// segment. | |
/// | |
/// License info is required for code citations. | |
final String? license; | |
CitationSource({ | |
this.startIndex, | |
this.endIndex, | |
this.uri, | |
this.license, | |
}) : super(fullyQualifiedName); | |
factory CitationSource.fromJson(Map<String, dynamic> json) { | |
return CitationSource( | |
startIndex: json['startIndex'], | |
endIndex: json['endIndex'], | |
uri: json['uri'], | |
license: json['license'], | |
); | |
} | |
@override | |
Object toJson() { | |
return { | |
if (startIndex != null) 'startIndex': startIndex, | |
if (endIndex != null) 'endIndex': endIndex, | |
if (uri != null) 'uri': uri, | |
if (license != null) 'license': license, | |
}; | |
} | |
@override | |
String toString() { | |
final contents = [ | |
if (startIndex != null) 'startIndex=$startIndex', | |
if (endIndex != null) 'endIndex=$endIndex', | |
if (uri != null) 'uri=$uri', | |
if (license != null) 'license=$license', | |
].join(','); | |
return 'CitationSource($contents)'; | |
} | |
} | |
/// The base structured datatype containing multi-part content of a message. | |
/// | |
/// A `Content` includes a `role` field designating the producer of the `Content` | |
/// and a `parts` field containing multi-part data that contains the content of | |
/// the message turn. | |
class Content extends ProtoMessage { | |
static const String fullyQualifiedName = | |
'google.ai.generativelanguage.v1.Content'; | |
/// Ordered `Parts` that constitute a single message. Parts may have different | |
/// MIME types. | |
final List<Part>? parts; | |
/// Optional. The producer of the content. Must be either 'user' or 'model'. | |
/// | |
/// Useful to set for multi-turn conversations, otherwise can be left blank | |
/// or unset. | |
final String? role; | |
Content({ | |
this.parts, | |
this.role, | |
}) : super(fullyQualifiedName); | |
factory Content.fromJson(Map<String, dynamic> json) { | |
return Content( | |
parts: decodeListMessage(json['parts'], Part.fromJson), | |
role: json['role'], | |
); | |
} | |
@override | |
Object toJson() { | |
return { | |
if (parts != null) 'parts': encodeList(parts), | |
if (role != null) 'role': role, | |
}; | |
} | |
@override | |
String toString() { | |
final contents = [ | |
if (role != null) 'role=$role', | |
].join(','); | |
return 'Content($contents)'; | |
} | |
} | |
/// A datatype containing media that is part of a multi-part `Content` message. | |
/// | |
/// A `Part` consists of data which has an associated datatype. A `Part` can only | |
/// contain one of the accepted types in `Part.data`. | |
/// | |
/// A `Part` must have a fixed IANA MIME type identifying the type and subtype | |
/// of the media if the `inline_data` field is filled with raw bytes. | |
class Part extends ProtoMessage { | |
static const String fullyQualifiedName = | |
'google.ai.generativelanguage.v1.Part'; | |
/// Inline text. | |
final String? text; | |
/// Inline media bytes. | |
final Blob? inlineData; | |
Part({ | |
this.text, | |
this.inlineData, | |
}) : super(fullyQualifiedName); | |
factory Part.fromJson(Map<String, dynamic> json) { | |
return Part( | |
text: json['text'], | |
inlineData: decode(json['inlineData'], Blob.fromJson), | |
); | |
} | |
@override | |
Object toJson() { | |
return { | |
if (text != null) 'text': text, | |
if (inlineData != null) 'inlineData': inlineData!.toJson(), | |
}; | |
} | |
@override | |
String toString() { | |
final contents = [ | |
if (text != null) 'text=$text', | |
].join(','); | |
return 'Part($contents)'; | |
} | |
} | |
/// Raw media bytes. | |
/// | |
/// Text should not be sent as raw bytes, use the 'text' field. | |
class Blob extends ProtoMessage { | |
static const String fullyQualifiedName = | |
'google.ai.generativelanguage.v1.Blob'; | |
/// The IANA standard MIME type of the source data. | |
/// Examples: | |
/// - image/png | |
/// - image/jpeg | |
/// If an unsupported MIME type is provided, an error will be returned. For a | |
/// complete list of supported types, see [Supported file | |
/// formats](https://ai.google.dev/gemini-api/docs/prompting_with_media#supported_file_formats). | |
final String? mimeType; | |
/// Raw bytes for media formats. | |
final Uint8List? data; | |
Blob({ | |
this.mimeType, | |
this.data, | |
}) : super(fullyQualifiedName); | |
factory Blob.fromJson(Map<String, dynamic> json) { | |
return Blob( | |
mimeType: json['mimeType'], | |
data: decodeBytes(json['data']), | |
); | |
} | |
@override | |
Object toJson() { | |
return { | |
if (mimeType != null) 'mimeType': mimeType, | |
if (data != null) 'data': encodeBytes(data), | |
}; | |
} | |
@override | |
String toString() { | |
final contents = [ | |
if (mimeType != null) 'mimeType=$mimeType', | |
if (data != null) 'data=$data', | |
].join(','); | |
return 'Blob($contents)'; | |
} | |
} | |
/// Request to generate a completion from the model. | |
class GenerateContentRequest extends ProtoMessage { | |
static const String fullyQualifiedName = | |
'google.ai.generativelanguage.v1.GenerateContentRequest'; | |
/// Required. The name of the `Model` to use for generating the completion. | |
/// | |
/// Format: `models/{model}`. | |
final String model; | |
/// Required. The content of the current conversation with the model. | |
/// | |
/// For single-turn queries, this is a single instance. For multi-turn queries | |
/// like [chat](https://ai.google.dev/gemini-api/docs/text-generation#chat), | |
/// this is a repeated field that contains the conversation history and the | |
/// latest request. | |
final List<Content>? contents; | |
/// Optional. A list of unique `SafetySetting` instances for blocking unsafe | |
/// content. | |
/// | |
/// This will be enforced on the `GenerateContentRequest.contents` and | |
/// `GenerateContentResponse.candidates`. There should not be more than one | |
/// setting for each `SafetyCategory` type. The API will block any contents and | |
/// responses that fail to meet the thresholds set by these settings. This list | |
/// overrides the default settings for each `SafetyCategory` specified in the | |
/// safety_settings. If there is no `SafetySetting` for a given | |
/// `SafetyCategory` provided in the list, the API will use the default safety | |
/// setting for that category. Harm categories HARM_CATEGORY_HATE_SPEECH, | |
/// HARM_CATEGORY_SEXUALLY_EXPLICIT, HARM_CATEGORY_DANGEROUS_CONTENT, | |
/// HARM_CATEGORY_HARASSMENT, HARM_CATEGORY_CIVIC_INTEGRITY are supported. | |
/// Refer to the [guide](https://ai.google.dev/gemini-api/docs/safety-settings) | |
/// for detailed information on available safety settings. Also refer to the | |
/// [Safety guidance](https://ai.google.dev/gemini-api/docs/safety-guidance) to | |
/// learn how to incorporate safety considerations in your AI applications. | |
final List<SafetySetting>? safetySettings; | |
/// Optional. Configuration options for model generation and outputs. | |
final GenerationConfig? generationConfig; | |
GenerateContentRequest({ | |
required this.model, | |
this.contents, | |
this.safetySettings, | |
this.generationConfig, | |
}) : super(fullyQualifiedName); | |
factory GenerateContentRequest.fromJson(Map<String, dynamic> json) { | |
return GenerateContentRequest( | |
model: json['model'], | |
contents: decodeListMessage(json['contents'], Content.fromJson), | |
safetySettings: | |
decodeListMessage(json['safetySettings'], SafetySetting.fromJson), | |
generationConfig: | |
decode(json['generationConfig'], GenerationConfig.fromJson), | |
); | |
} | |
@override | |
Object toJson() { | |
return { | |
'model': model, | |
if (contents != null) 'contents': encodeList(contents), | |
if (safetySettings != null) 'safetySettings': encodeList(safetySettings), | |
if (generationConfig != null) | |
'generationConfig': generationConfig!.toJson(), | |
}; | |
} | |
@override | |
String toString() { | |
final contents = [ | |
'model=$model', | |
].join(','); | |
return 'GenerateContentRequest($contents)'; | |
} | |
} | |
/// Configuration options for model generation and outputs. Not all parameters | |
/// are configurable for every model. | |
class GenerationConfig extends ProtoMessage { | |
static const String fullyQualifiedName = | |
'google.ai.generativelanguage.v1.GenerationConfig'; | |
/// Optional. Number of generated responses to return. | |
/// | |
/// Currently, this value can only be set to 1. If unset, this will default | |
/// to 1. | |
final int? candidateCount; | |
/// Optional. The set of character sequences (up to 5) that will stop output | |
/// generation. If specified, the API will stop at the first appearance of a | |
/// `stop_sequence`. The stop sequence will not be included as part of the | |
/// response. | |
final List<String>? stopSequences; | |
/// Optional. The maximum number of tokens to include in a response candidate. | |
/// | |
/// Note: The default value varies by model, see the `Model.output_token_limit` | |
/// attribute of the `Model` returned from the `getModel` function. | |
final int? maxOutputTokens; | |
/// Optional. Controls the randomness of the output. | |
/// | |
/// Note: The default value varies by model, see the `Model.temperature` | |
/// attribute of the `Model` returned from the `getModel` function. | |
/// | |
/// Values can range from [0.0, 2.0]. | |
final double? temperature; | |
/// Optional. The maximum cumulative probability of tokens to consider when | |
/// sampling. | |
/// | |
/// The model uses combined Top-k and Top-p (nucleus) sampling. | |
/// | |
/// Tokens are sorted based on their assigned probabilities so that only the | |
/// most likely tokens are considered. Top-k sampling directly limits the | |
/// maximum number of tokens to consider, while Nucleus sampling limits the | |
/// number of tokens based on the cumulative probability. | |
/// | |
/// Note: The default value varies by `Model` and is specified by | |
/// the`Model.top_p` attribute returned from the `getModel` function. An empty | |
/// `top_k` attribute indicates that the model doesn't apply top-k sampling | |
/// and doesn't allow setting `top_k` on requests. | |
final double? topP; | |
/// Optional. The maximum number of tokens to consider when sampling. | |
/// | |
/// Gemini models use Top-p (nucleus) sampling or a combination of Top-k and | |
/// nucleus sampling. Top-k sampling considers the set of `top_k` most probable | |
/// tokens. Models running with nucleus sampling don't allow top_k setting. | |
/// | |
/// Note: The default value varies by `Model` and is specified by | |
/// the`Model.top_p` attribute returned from the `getModel` function. An empty | |
/// `top_k` attribute indicates that the model doesn't apply top-k sampling | |
/// and doesn't allow setting `top_k` on requests. | |
final int? topK; | |
/// Optional. Presence penalty applied to the next token's logprobs if the | |
/// token has already been seen in the response. | |
/// | |
/// This penalty is binary on/off and not dependant on the number of times the | |
/// token is used (after the first). Use | |
/// `frequency_penalty` | |
/// for a penalty that increases with each use. | |
/// | |
/// A positive penalty will discourage the use of tokens that have already | |
/// been used in the response, increasing the vocabulary. | |
/// | |
/// A negative penalty will encourage the use of tokens that have already been | |
/// used in the response, decreasing the vocabulary. | |
final double? presencePenalty; | |
/// Optional. Frequency penalty applied to the next token's logprobs, | |
/// multiplied by the number of times each token has been seen in the respponse | |
/// so far. | |
/// | |
/// A positive penalty will discourage the use of tokens that have already | |
/// been used, proportional to the number of times the token has been used: | |
/// The more a token is used, the more dificult it is for the model to use | |
/// that token again increasing the vocabulary of responses. | |
/// | |
/// Caution: A _negative_ penalty will encourage the model to reuse tokens | |
/// proportional to the number of times the token has been used. Small | |
/// negative values will reduce the vocabulary of a response. Larger negative | |
/// values will cause the model to start repeating a common token until it | |
/// hits the | |
/// `max_output_tokens` | |
/// limit. | |
final double? frequencyPenalty; | |
/// Optional. If true, export the logprobs results in response. | |
final bool? responseLogprobs; | |
/// Optional. Only valid if | |
/// [response_logprobs=True][google.ai.generativelanguage.v1.GenerationConfig.response_logprobs]. | |
/// This sets the number of top logprobs to return at each decoding step in the | |
/// `Candidate.logprobs_result`. | |
final int? logprobs; | |
/// Optional. Enables enhanced civic answers. It may not be available for all | |
/// models. | |
final bool? enableEnhancedCivicAnswers; | |
GenerationConfig({ | |
this.candidateCount, | |
this.stopSequences, | |
this.maxOutputTokens, | |
this.temperature, | |
this.topP, | |
this.topK, | |
this.presencePenalty, | |
this.frequencyPenalty, | |
this.responseLogprobs, | |
this.logprobs, | |
this.enableEnhancedCivicAnswers, | |
}) : super(fullyQualifiedName); | |
factory GenerationConfig.fromJson(Map<String, dynamic> json) { | |
return GenerationConfig( | |
candidateCount: json['candidateCount'], | |
stopSequences: decodeList(json['stopSequences']), | |
maxOutputTokens: json['maxOutputTokens'], | |
temperature: decodeDouble(json['temperature']), | |
topP: decodeDouble(json['topP']), | |
topK: json['topK'], | |
presencePenalty: decodeDouble(json['presencePenalty']), | |
frequencyPenalty: decodeDouble(json['frequencyPenalty']), | |
responseLogprobs: json['responseLogprobs'], | |
logprobs: json['logprobs'], | |
enableEnhancedCivicAnswers: json['enableEnhancedCivicAnswers'], | |
); | |
} | |
@override | |
Object toJson() { | |
return { | |
if (candidateCount != null) 'candidateCount': candidateCount, | |
if (stopSequences != null) 'stopSequences': stopSequences, | |
if (maxOutputTokens != null) 'maxOutputTokens': maxOutputTokens, | |
if (temperature != null) 'temperature': encodeDouble(temperature), | |
if (topP != null) 'topP': encodeDouble(topP), | |
if (topK != null) 'topK': topK, | |
if (presencePenalty != null) | |
'presencePenalty': encodeDouble(presencePenalty), | |
if (frequencyPenalty != null) | |
'frequencyPenalty': encodeDouble(frequencyPenalty), | |
if (responseLogprobs != null) 'responseLogprobs': responseLogprobs, | |
if (logprobs != null) 'logprobs': logprobs, | |
if (enableEnhancedCivicAnswers != null) | |
'enableEnhancedCivicAnswers': enableEnhancedCivicAnswers, | |
}; | |
} | |
@override | |
String toString() { | |
final contents = [ | |
if (candidateCount != null) 'candidateCount=$candidateCount', | |
if (maxOutputTokens != null) 'maxOutputTokens=$maxOutputTokens', | |
if (temperature != null) 'temperature=$temperature', | |
if (topP != null) 'topP=$topP', | |
if (topK != null) 'topK=$topK', | |
if (presencePenalty != null) 'presencePenalty=$presencePenalty', | |
if (frequencyPenalty != null) 'frequencyPenalty=$frequencyPenalty', | |
if (responseLogprobs != null) 'responseLogprobs=$responseLogprobs', | |
if (logprobs != null) 'logprobs=$logprobs', | |
if (enableEnhancedCivicAnswers != null) | |
'enableEnhancedCivicAnswers=$enableEnhancedCivicAnswers', | |
].join(','); | |
return 'GenerationConfig($contents)'; | |
} | |
} | |
/// Response from the model supporting multiple candidate responses. | |
/// | |
/// Safety ratings and content filtering are reported for both | |
/// prompt in `GenerateContentResponse.prompt_feedback` and for each candidate | |
/// in `finish_reason` and in `safety_ratings`. The API: | |
/// - Returns either all requested candidates or none of them | |
/// - Returns no candidates at all only if there was something wrong with the | |
/// prompt (check `prompt_feedback`) | |
/// - Reports feedback on each candidate in `finish_reason` and | |
/// `safety_ratings`. | |
class GenerateContentResponse extends ProtoMessage { | |
static const String fullyQualifiedName = | |
'google.ai.generativelanguage.v1.GenerateContentResponse'; | |
/// Candidate responses from the model. | |
final List<Candidate>? candidates; | |
/// Returns the prompt's feedback related to the content filters. | |
final GenerateContentResponse_PromptFeedback? promptFeedback; | |
/// Output only. Metadata on the generation requests' token usage. | |
final GenerateContentResponse_UsageMetadata? usageMetadata; | |
/// Output only. The model version used to generate the response. | |
final String? modelVersion; | |
GenerateContentResponse({ | |
this.candidates, | |
this.promptFeedback, | |
this.usageMetadata, | |
this.modelVersion, | |
}) : super(fullyQualifiedName); | |
factory GenerateContentResponse.fromJson(Map<String, dynamic> json) { | |
return GenerateContentResponse( | |
candidates: decodeListMessage(json['candidates'], Candidate.fromJson), | |
promptFeedback: decode(json['promptFeedback'], | |
GenerateContentResponse_PromptFeedback.fromJson), | |
usageMetadata: decode(json['usageMetadata'], | |
GenerateContentResponse_UsageMetadata.fromJson), | |
modelVersion: json['modelVersion'], | |
); | |
} | |
@override | |
Object toJson() { | |
return { | |
if (candidates != null) 'candidates': encodeList(candidates), | |
if (promptFeedback != null) 'promptFeedback': promptFeedback!.toJson(), | |
if (usageMetadata != null) 'usageMetadata': usageMetadata!.toJson(), | |
if (modelVersion != null) 'modelVersion': modelVersion, | |
}; | |
} | |
@override | |
String toString() { | |
final contents = [ | |
if (modelVersion != null) 'modelVersion=$modelVersion', | |
].join(','); | |
return 'GenerateContentResponse($contents)'; | |
} | |
} | |
/// A set of the feedback metadata the prompt specified in | |
/// `GenerateContentRequest.content`. | |
class GenerateContentResponse_PromptFeedback extends ProtoMessage { | |
static const String fullyQualifiedName = | |
'google.ai.generativelanguage.v1.GenerateContentResponse.PromptFeedback'; | |
/// Optional. If set, the prompt was blocked and no candidates are returned. | |
/// Rephrase the prompt. | |
final GenerateContentResponse_PromptFeedback_BlockReason? blockReason; | |
/// Ratings for safety of the prompt. | |
/// There is at most one rating per category. | |
final List<SafetyRating>? safetyRatings; | |
GenerateContentResponse_PromptFeedback({ | |
this.blockReason, | |
this.safetyRatings, | |
}) : super(fullyQualifiedName); | |
factory GenerateContentResponse_PromptFeedback.fromJson( | |
Map<String, dynamic> json) { | |
return GenerateContentResponse_PromptFeedback( | |
blockReason: decodeEnum(json['blockReason'], | |
GenerateContentResponse_PromptFeedback_BlockReason.fromJson), | |
safetyRatings: | |
decodeListMessage(json['safetyRatings'], SafetyRating.fromJson), | |
); | |
} | |
@override | |
Object toJson() { | |
return { | |
if (blockReason != null) 'blockReason': blockReason!.toJson(), | |
if (safetyRatings != null) 'safetyRatings': encodeList(safetyRatings), | |
}; | |
} | |
@override | |
String toString() { | |
final contents = [ | |
if (blockReason != null) 'blockReason=$blockReason', | |
].join(','); | |
return 'PromptFeedback($contents)'; | |
} | |
} | |
/// Specifies the reason why the prompt was blocked. | |
class GenerateContentResponse_PromptFeedback_BlockReason extends ProtoEnum { | |
/// Default value. This value is unused. | |
static const blockReasonUnspecified = | |
GenerateContentResponse_PromptFeedback_BlockReason( | |
'BLOCK_REASON_UNSPECIFIED'); | |
/// Prompt was blocked due to safety reasons. Inspect `safety_ratings` | |
/// to understand which safety category blocked it. | |
static const safety = | |
GenerateContentResponse_PromptFeedback_BlockReason('SAFETY'); | |
/// Prompt was blocked due to unknown reasons. | |
static const other = | |
GenerateContentResponse_PromptFeedback_BlockReason('OTHER'); | |
/// Prompt was blocked due to the terms which are included from the | |
/// terminology blocklist. | |
static const blocklist = | |
GenerateContentResponse_PromptFeedback_BlockReason('BLOCKLIST'); | |
/// Prompt was blocked due to prohibited content. | |
static const prohibitedContent = | |
GenerateContentResponse_PromptFeedback_BlockReason('PROHIBITED_CONTENT'); | |
/// Candidates blocked due to unsafe image generation content. | |
static const imageSafety = | |
GenerateContentResponse_PromptFeedback_BlockReason('IMAGE_SAFETY'); | |
const GenerateContentResponse_PromptFeedback_BlockReason(super.value); | |
factory GenerateContentResponse_PromptFeedback_BlockReason.fromJson( | |
String json) => | |
GenerateContentResponse_PromptFeedback_BlockReason(json); | |
@override | |
String toString() => 'BlockReason.$value'; | |
} | |
/// Metadata on the generation request's token usage. | |
class GenerateContentResponse_UsageMetadata extends ProtoMessage { | |
static const String fullyQualifiedName = | |
'google.ai.generativelanguage.v1.GenerateContentResponse.UsageMetadata'; | |
/// Number of tokens in the prompt. When `cached_content` is set, this is | |
/// still the total effective prompt size meaning this includes the number of | |
/// tokens in the cached content. | |
final int? promptTokenCount; | |
/// Total number of tokens across all the generated response candidates. | |
final int? candidatesTokenCount; | |
/// Total token count for the generation request (prompt + response | |
/// candidates). | |
final int? totalTokenCount; | |
GenerateContentResponse_UsageMetadata({ | |
this.promptTokenCount, | |
this.candidatesTokenCount, | |
this.totalTokenCount, | |
}) : super(fullyQualifiedName); | |
factory GenerateContentResponse_UsageMetadata.fromJson( | |
Map<String, dynamic> json) { | |
return GenerateContentResponse_UsageMetadata( | |
promptTokenCount: json['promptTokenCount'], | |
candidatesTokenCount: json['candidatesTokenCount'], | |
totalTokenCount: json['totalTokenCount'], | |
); | |
} | |
@override | |
Object toJson() { | |
return { | |
if (promptTokenCount != null) 'promptTokenCount': promptTokenCount, | |
if (candidatesTokenCount != null) | |
'candidatesTokenCount': candidatesTokenCount, | |
if (totalTokenCount != null) 'totalTokenCount': totalTokenCount, | |
}; | |
} | |
@override | |
String toString() { | |
final contents = [ | |
if (promptTokenCount != null) 'promptTokenCount=$promptTokenCount', | |
if (candidatesTokenCount != null) | |
'candidatesTokenCount=$candidatesTokenCount', | |
if (totalTokenCount != null) 'totalTokenCount=$totalTokenCount', | |
].join(','); | |
return 'UsageMetadata($contents)'; | |
} | |
} | |
/// A response candidate generated from the model. | |
class Candidate extends ProtoMessage { | |
static const String fullyQualifiedName = | |
'google.ai.generativelanguage.v1.Candidate'; | |
/// Output only. Index of the candidate in the list of response candidates. | |
final int? index; | |
/// Output only. Generated content returned from the model. | |
final Content? content; | |
/// Optional. Output only. The reason why the model stopped generating tokens. | |
/// | |
/// If empty, the model has not stopped generating tokens. | |
final Candidate_FinishReason? finishReason; | |
/// List of ratings for the safety of a response candidate. | |
/// | |
/// There is at most one rating per category. | |
final List<SafetyRating>? safetyRatings; | |
/// Output only. Citation information for model-generated candidate. | |
/// | |
/// This field may be populated with recitation information for any text | |
/// included in the `content`. These are passages that are "recited" from | |
/// copyrighted material in the foundational LLM's training data. | |
final CitationMetadata? citationMetadata; | |
/// Output only. Token count for this candidate. | |
final int? tokenCount; | |
/// Output only. Grounding metadata for the candidate. | |
/// | |
/// This field is populated for `GenerateContent` calls. | |
final GroundingMetadata? groundingMetadata; | |
/// Output only. Average log probability score of the candidate. | |
final double? avgLogprobs; | |
/// Output only. Log-likelihood scores for the response tokens and top tokens | |
final LogprobsResult? logprobsResult; | |
Candidate({ | |
this.index, | |
this.content, | |
this.finishReason, | |
this.safetyRatings, | |
this.citationMetadata, | |
this.tokenCount, | |
this.groundingMetadata, | |
this.avgLogprobs, | |
this.logprobsResult, | |
}) : super(fullyQualifiedName); | |
factory Candidate.fromJson(Map<String, dynamic> json) { | |
return Candidate( | |
index: json['index'], | |
content: decode(json['content'], Content.fromJson), | |
finishReason: | |
decodeEnum(json['finishReason'], Candidate_FinishReason.fromJson), | |
safetyRatings: | |
decodeListMessage(json['safetyRatings'], SafetyRating.fromJson), | |
citationMetadata: | |
decode(json['citationMetadata'], CitationMetadata.fromJson), | |
tokenCount: json['tokenCount'], | |
groundingMetadata: | |
decode(json['groundingMetadata'], GroundingMetadata.fromJson), | |
avgLogprobs: decodeDouble(json['avgLogprobs']), | |
logprobsResult: decode(json['logprobsResult'], LogprobsResult.fromJson), | |
); | |
} | |
@override | |
Object toJson() { | |
return { | |
if (index != null) 'index': index, | |
if (content != null) 'content': content!.toJson(), | |
if (finishReason != null) 'finishReason': finishReason!.toJson(), | |
if (safetyRatings != null) 'safetyRatings': encodeList(safetyRatings), | |
if (citationMetadata != null) | |
'citationMetadata': citationMetadata!.toJson(), | |
if (tokenCount != null) 'tokenCount': tokenCount, | |
if (groundingMetadata != null) | |
'groundingMetadata': groundingMetadata!.toJson(), | |
if (avgLogprobs != null) 'avgLogprobs': encodeDouble(avgLogprobs), | |
if (logprobsResult != null) 'logprobsResult': logprobsResult!.toJson(), | |
}; | |
} | |
@override | |
String toString() { | |
final contents = [ | |
if (index != null) 'index=$index', | |
if (finishReason != null) 'finishReason=$finishReason', | |
if (tokenCount != null) 'tokenCount=$tokenCount', | |
if (avgLogprobs != null) 'avgLogprobs=$avgLogprobs', | |
].join(','); | |
return 'Candidate($contents)'; | |
} | |
} | |
/// Defines the reason why the model stopped generating tokens. | |
class Candidate_FinishReason extends ProtoEnum { | |
/// Default value. This value is unused. | |
static const finishReasonUnspecified = | |
Candidate_FinishReason('FINISH_REASON_UNSPECIFIED'); | |
/// Natural stop point of the model or provided stop sequence. | |
static const stop = Candidate_FinishReason('STOP'); | |
/// The maximum number of tokens as specified in the request was reached. | |
static const maxTokens = Candidate_FinishReason('MAX_TOKENS'); | |
/// The response candidate content was flagged for safety reasons. | |
static const safety = Candidate_FinishReason('SAFETY'); | |
/// The response candidate content was flagged for recitation reasons. | |
static const recitation = Candidate_FinishReason('RECITATION'); | |
/// The response candidate content was flagged for using an unsupported | |
/// language. | |
static const language = Candidate_FinishReason('LANGUAGE'); | |
/// Unknown reason. | |
static const other = Candidate_FinishReason('OTHER'); | |
/// Token generation stopped because the content contains forbidden terms. | |
static const blocklist = Candidate_FinishReason('BLOCKLIST'); | |
/// Token generation stopped for potentially containing prohibited content. | |
static const prohibitedContent = Candidate_FinishReason('PROHIBITED_CONTENT'); | |
/// Token generation stopped because the content potentially contains | |
/// Sensitive Personally Identifiable Information (SPII). | |
static const spii = Candidate_FinishReason('SPII'); | |
/// The function call generated by the model is invalid. | |
static const malformedFunctionCall = | |
Candidate_FinishReason('MALFORMED_FUNCTION_CALL'); | |
/// Token generation stopped because generated images contain safety | |
/// violations. | |
static const imageSafety = Candidate_FinishReason('IMAGE_SAFETY'); | |
const Candidate_FinishReason(super.value); | |
factory Candidate_FinishReason.fromJson(String json) => | |
Candidate_FinishReason(json); | |
@override | |
String toString() => 'FinishReason.$value'; | |
} | |
/// Logprobs Result | |
class LogprobsResult extends ProtoMessage { | |
static const String fullyQualifiedName = | |
'google.ai.generativelanguage.v1.LogprobsResult'; | |
/// Length = total number of decoding steps. | |
final List<LogprobsResult_TopCandidates>? topCandidates; | |
/// Length = total number of decoding steps. | |
/// The chosen candidates may or may not be in top_candidates. | |
final List<LogprobsResult_Candidate>? chosenCandidates; | |
LogprobsResult({ | |
this.topCandidates, | |
this.chosenCandidates, | |
}) : super(fullyQualifiedName); | |
factory LogprobsResult.fromJson(Map<String, dynamic> json) { | |
return LogprobsResult( | |
topCandidates: decodeListMessage( | |
json['topCandidates'], LogprobsResult_TopCandidates.fromJson), | |
chosenCandidates: decodeListMessage( | |
json['chosenCandidates'], LogprobsResult_Candidate.fromJson), | |
); | |
} | |
@override | |
Object toJson() { | |
return { | |
if (topCandidates != null) 'topCandidates': encodeList(topCandidates), | |
if (chosenCandidates != null) | |
'chosenCandidates': encodeList(chosenCandidates), | |
}; | |
} | |
@override | |
String toString() => 'LogprobsResult()'; | |
} | |
/// Candidate for the logprobs token and score. | |
class LogprobsResult_Candidate extends ProtoMessage { | |
static const String fullyQualifiedName = | |
'google.ai.generativelanguage.v1.LogprobsResult.Candidate'; | |
/// The candidate’s token string value. | |
final String? token; | |
/// The candidate’s token id value. | |
final int? tokenId; | |
/// The candidate's log probability. | |
final double? logProbability; | |
LogprobsResult_Candidate({ | |
this.token, | |
this.tokenId, | |
this.logProbability, | |
}) : super(fullyQualifiedName); | |
factory LogprobsResult_Candidate.fromJson(Map<String, dynamic> json) { | |
return LogprobsResult_Candidate( | |
token: json['token'], | |
tokenId: json['tokenId'], | |
logProbability: decodeDouble(json['logProbability']), | |
); | |
} | |
@override | |
Object toJson() { | |
return { | |
if (token != null) 'token': token, | |
if (tokenId != null) 'tokenId': tokenId, | |
if (logProbability != null) | |
'logProbability': encodeDouble(logProbability), | |
}; | |
} | |
@override | |
String toString() { | |
final contents = [ | |
if (token != null) 'token=$token', | |
if (tokenId != null) 'tokenId=$tokenId', | |
if (logProbability != null) 'logProbability=$logProbability', | |
].join(','); | |
return 'Candidate($contents)'; | |
} | |
} | |
/// Candidates with top log probabilities at each decoding step. | |
class LogprobsResult_TopCandidates extends ProtoMessage { | |
static const String fullyQualifiedName = | |
'google.ai.generativelanguage.v1.LogprobsResult.TopCandidates'; | |
/// Sorted by log probability in descending order. | |
final List<LogprobsResult_Candidate>? candidates; | |
LogprobsResult_TopCandidates({ | |
this.candidates, | |
}) : super(fullyQualifiedName); | |
factory LogprobsResult_TopCandidates.fromJson(Map<String, dynamic> json) { | |
return LogprobsResult_TopCandidates( | |
candidates: decodeListMessage( | |
json['candidates'], LogprobsResult_Candidate.fromJson), | |
); | |
} | |
@override | |
Object toJson() { | |
return { | |
if (candidates != null) 'candidates': encodeList(candidates), | |
}; | |
} | |
@override | |
String toString() => 'TopCandidates()'; | |
} | |
/// Metadata related to retrieval in the grounding flow. | |
class RetrievalMetadata extends ProtoMessage { | |
static const String fullyQualifiedName = | |
'google.ai.generativelanguage.v1.RetrievalMetadata'; | |
/// Optional. Score indicating how likely information from google search could | |
/// help answer the prompt. The score is in the range [0, 1], where 0 is the | |
/// least likely and 1 is the most likely. This score is only populated when | |
/// google search grounding and dynamic retrieval is enabled. It will be | |
/// compared to the threshold to determine whether to trigger google search. | |
final double? googleSearchDynamicRetrievalScore; | |
RetrievalMetadata({ | |
this.googleSearchDynamicRetrievalScore, | |
}) : super(fullyQualifiedName); | |
factory RetrievalMetadata.fromJson(Map<String, dynamic> json) { | |
return RetrievalMetadata( | |
googleSearchDynamicRetrievalScore: | |
decodeDouble(json['googleSearchDynamicRetrievalScore']), | |
); | |
} | |
@override | |
Object toJson() { | |
return { | |
if (googleSearchDynamicRetrievalScore != null) | |
'googleSearchDynamicRetrievalScore': | |
encodeDouble(googleSearchDynamicRetrievalScore), | |
}; | |
} | |
@override | |
String toString() { | |
final contents = [ | |
if (googleSearchDynamicRetrievalScore != null) | |
'googleSearchDynamicRetrievalScore=$googleSearchDynamicRetrievalScore', | |
].join(','); | |
return 'RetrievalMetadata($contents)'; | |
} | |
} | |
/// Metadata returned to client when grounding is enabled. | |
class GroundingMetadata extends ProtoMessage { | |
static const String fullyQualifiedName = | |
'google.ai.generativelanguage.v1.GroundingMetadata'; | |
/// Optional. Google search entry for the following-up web searches. | |
final SearchEntryPoint? searchEntryPoint; | |
/// List of supporting references retrieved from specified grounding source. | |
final List<GroundingChunk>? groundingChunks; | |
/// List of grounding support. | |
final List<GroundingSupport>? groundingSupports; | |
/// Metadata related to retrieval in the grounding flow. | |
final RetrievalMetadata? retrievalMetadata; | |
/// Web search queries for the following-up web search. | |
final List<String>? webSearchQueries; | |
GroundingMetadata({ | |
this.searchEntryPoint, | |
this.groundingChunks, | |
this.groundingSupports, | |
this.retrievalMetadata, | |
this.webSearchQueries, | |
}) : super(fullyQualifiedName); | |
factory GroundingMetadata.fromJson(Map<String, dynamic> json) { | |
return GroundingMetadata( | |
searchEntryPoint: | |
decode(json['searchEntryPoint'], SearchEntryPoint.fromJson), | |
groundingChunks: | |
decodeListMessage(json['groundingChunks'], GroundingChunk.fromJson), | |
groundingSupports: decodeListMessage( | |
json['groundingSupports'], GroundingSupport.fromJson), | |
retrievalMetadata: | |
decode(json['retrievalMetadata'], RetrievalMetadata.fromJson), | |
webSearchQueries: decodeList(json['webSearchQueries']), | |
); | |
} | |
@override | |
Object toJson() { | |
return { | |
if (searchEntryPoint != null) | |
'searchEntryPoint': searchEntryPoint!.toJson(), | |
if (groundingChunks != null) | |
'groundingChunks': encodeList(groundingChunks), | |
if (groundingSupports != null) | |
'groundingSupports': encodeList(groundingSupports), | |
if (retrievalMetadata != null) | |
'retrievalMetadata': retrievalMetadata!.toJson(), | |
if (webSearchQueries != null) 'webSearchQueries': webSearchQueries, | |
}; | |
} | |
@override | |
String toString() => 'GroundingMetadata()'; | |
} | |
/// Google search entry point. | |
class SearchEntryPoint extends ProtoMessage { | |
static const String fullyQualifiedName = | |
'google.ai.generativelanguage.v1.SearchEntryPoint'; | |
/// Optional. Web content snippet that can be embedded in a web page or an app | |
/// webview. | |
final String? renderedContent; | |
/// Optional. Base64 encoded JSON representing array of <search term, search | |
/// url> tuple. | |
final Uint8List? sdkBlob; | |
SearchEntryPoint({ | |
this.renderedContent, | |
this.sdkBlob, | |
}) : super(fullyQualifiedName); | |
factory SearchEntryPoint.fromJson(Map<String, dynamic> json) { | |
return SearchEntryPoint( | |
renderedContent: json['renderedContent'], | |
sdkBlob: decodeBytes(json['sdkBlob']), | |
); | |
} | |
@override | |
Object toJson() { | |
return { | |
if (renderedContent != null) 'renderedContent': renderedContent, | |
if (sdkBlob != null) 'sdkBlob': encodeBytes(sdkBlob), | |
}; | |
} | |
@override | |
String toString() { | |
final contents = [ | |
if (renderedContent != null) 'renderedContent=$renderedContent', | |
if (sdkBlob != null) 'sdkBlob=$sdkBlob', | |
].join(','); | |
return 'SearchEntryPoint($contents)'; | |
} | |
} | |
/// Grounding chunk. | |
class GroundingChunk extends ProtoMessage { | |
static const String fullyQualifiedName = | |
'google.ai.generativelanguage.v1.GroundingChunk'; | |
/// Grounding chunk from the web. | |
final GroundingChunk_Web? web; | |
GroundingChunk({ | |
this.web, | |
}) : super(fullyQualifiedName); | |
factory GroundingChunk.fromJson(Map<String, dynamic> json) { | |
return GroundingChunk( | |
web: decode(json['web'], GroundingChunk_Web.fromJson), | |
); | |
} | |
@override | |
Object toJson() { | |
return { | |
if (web != null) 'web': web!.toJson(), | |
}; | |
} | |
@override | |
String toString() => 'GroundingChunk()'; | |
} | |
/// Chunk from the web. | |
class GroundingChunk_Web extends ProtoMessage { | |
static const String fullyQualifiedName = | |
'google.ai.generativelanguage.v1.GroundingChunk.Web'; | |
/// URI reference of the chunk. | |
final String? uri; | |
/// Title of the chunk. | |
final String? title; | |
GroundingChunk_Web({ | |
this.uri, | |
this.title, | |
}) : super(fullyQualifiedName); | |
factory GroundingChunk_Web.fromJson(Map<String, dynamic> json) { | |
return GroundingChunk_Web( | |
uri: json['uri'], | |
title: json['title'], | |
); | |
} | |
@override | |
Object toJson() { | |
return { | |
if (uri != null) 'uri': uri, | |
if (title != null) 'title': title, | |
}; | |
} | |
@override | |
String toString() { | |
final contents = [ | |
if (uri != null) 'uri=$uri', | |
if (title != null) 'title=$title', | |
].join(','); | |
return 'Web($contents)'; | |
} | |
} | |
/// Segment of the content. | |
class Segment extends ProtoMessage { | |
static const String fullyQualifiedName = | |
'google.ai.generativelanguage.v1.Segment'; | |
/// Output only. The index of a Part object within its parent Content object. | |
final int? partIndex; | |
/// Output only. Start index in the given Part, measured in bytes. Offset from | |
/// the start of the Part, inclusive, starting at zero. | |
final int? startIndex; | |
/// Output only. End index in the given Part, measured in bytes. Offset from | |
/// the start of the Part, exclusive, starting at zero. | |
final int? endIndex; | |
/// Output only. The text corresponding to the segment from the response. | |
final String? text; | |
Segment({ | |
this.partIndex, | |
this.startIndex, | |
this.endIndex, | |
this.text, | |
}) : super(fullyQualifiedName); | |
factory Segment.fromJson(Map<String, dynamic> json) { | |
return Segment( | |
partIndex: json['partIndex'], | |
startIndex: json['startIndex'], | |
endIndex: json['endIndex'], | |
text: json['text'], | |
); | |
} | |
@override | |
Object toJson() { | |
return { | |
if (partIndex != null) 'partIndex': partIndex, | |
if (startIndex != null) 'startIndex': startIndex, | |
if (endIndex != null) 'endIndex': endIndex, | |
if (text != null) 'text': text, | |
}; | |
} | |
@override | |
String toString() { | |
final contents = [ | |
if (partIndex != null) 'partIndex=$partIndex', | |
if (startIndex != null) 'startIndex=$startIndex', | |
if (endIndex != null) 'endIndex=$endIndex', | |
if (text != null) 'text=$text', | |
].join(','); | |
return 'Segment($contents)'; | |
} | |
} | |
/// Grounding support. | |
class GroundingSupport extends ProtoMessage { | |
static const String fullyQualifiedName = | |
'google.ai.generativelanguage.v1.GroundingSupport'; | |
/// Segment of the content this support belongs to. | |
final Segment? segment; | |
/// A list of indices (into 'grounding_chunk') specifying the | |
/// citations associated with the claim. For instance [1,3,4] means | |
/// that grounding_chunk[1], grounding_chunk[3], | |
/// grounding_chunk[4] are the retrieved content attributed to the claim. | |
final List<int>? groundingChunkIndices; | |
/// Confidence score of the support references. Ranges from 0 to 1. 1 is the | |
/// most confident. This list must have the same size as the | |
/// grounding_chunk_indices. | |
final List<double>? confidenceScores; | |
GroundingSupport({ | |
this.segment, | |
this.groundingChunkIndices, | |
this.confidenceScores, | |
}) : super(fullyQualifiedName); | |
factory GroundingSupport.fromJson(Map<String, dynamic> json) { | |
return GroundingSupport( | |
segment: decode(json['segment'], Segment.fromJson), | |
groundingChunkIndices: decodeList(json['groundingChunkIndices']), | |
confidenceScores: decodeList(json['confidenceScores']), | |
); | |
} | |
@override | |
Object toJson() { | |
return { | |
if (segment != null) 'segment': segment!.toJson(), | |
if (groundingChunkIndices != null) | |
'groundingChunkIndices': groundingChunkIndices, | |
if (confidenceScores != null) 'confidenceScores': confidenceScores, | |
}; | |
} | |
@override | |
String toString() => 'GroundingSupport()'; | |
} | |
/// Request containing the `Content` for the model to embed. | |
class EmbedContentRequest extends ProtoMessage { | |
static const String fullyQualifiedName = | |
'google.ai.generativelanguage.v1.EmbedContentRequest'; | |
/// Required. The model's resource name. This serves as an ID for the Model to | |
/// use. | |
/// | |
/// This name should match a model name returned by the `ListModels` method. | |
/// | |
/// Format: `models/{model}` | |
final String model; | |
/// Required. The content to embed. Only the `parts.text` fields will be | |
/// counted. | |
final Content? content; | |
/// Optional. Optional task type for which the embeddings will be used. Can | |
/// only be set for `models/embedding-001`. | |
final TaskType? taskType; | |
/// Optional. An optional title for the text. Only applicable when TaskType is | |
/// `RETRIEVAL_DOCUMENT`. | |
/// | |
/// Note: Specifying a `title` for `RETRIEVAL_DOCUMENT` provides better quality | |
/// embeddings for retrieval. | |
final String? title; | |
/// Optional. Optional reduced dimension for the output embedding. If set, | |
/// excessive values in the output embedding are truncated from the end. | |
/// Supported by newer models since 2024 only. You cannot set this value if | |
/// using the earlier model (`models/embedding-001`). | |
final int? outputDimensionality; | |
EmbedContentRequest({ | |
required this.model, | |
this.content, | |
this.taskType, | |
this.title, | |
this.outputDimensionality, | |
}) : super(fullyQualifiedName); | |
factory EmbedContentRequest.fromJson(Map<String, dynamic> json) { | |
return EmbedContentRequest( | |
model: json['model'], | |
content: decode(json['content'], Content.fromJson), | |
taskType: decodeEnum(json['taskType'], TaskType.fromJson), | |
title: json['title'], | |
outputDimensionality: json['outputDimensionality'], | |
); | |
} | |
@override | |
Object toJson() { | |
return { | |
'model': model, | |
if (content != null) 'content': content!.toJson(), | |
if (taskType != null) 'taskType': taskType!.toJson(), | |
if (title != null) 'title': title, | |
if (outputDimensionality != null) | |
'outputDimensionality': outputDimensionality, | |
}; | |
} | |
@override | |
String toString() { | |
final contents = [ | |
'model=$model', | |
if (taskType != null) 'taskType=$taskType', | |
if (title != null) 'title=$title', | |
if (outputDimensionality != null) | |
'outputDimensionality=$outputDimensionality', | |
].join(','); | |
return 'EmbedContentRequest($contents)'; | |
} | |
} | |
/// A list of floats representing an embedding. | |
class ContentEmbedding extends ProtoMessage { | |
static const String fullyQualifiedName = | |
'google.ai.generativelanguage.v1.ContentEmbedding'; | |
/// The embedding values. | |
final List<double>? values; | |
ContentEmbedding({ | |
this.values, | |
}) : super(fullyQualifiedName); | |
factory ContentEmbedding.fromJson(Map<String, dynamic> json) { | |
return ContentEmbedding( | |
values: decodeList(json['values']), | |
); | |
} | |
@override | |
Object toJson() { | |
return { | |
if (values != null) 'values': values, | |
}; | |
} | |
@override | |
String toString() => 'ContentEmbedding()'; | |
} | |
/// The response to an `EmbedContentRequest`. | |
class EmbedContentResponse extends ProtoMessage { | |
static const String fullyQualifiedName = | |
'google.ai.generativelanguage.v1.EmbedContentResponse'; | |
/// Output only. The embedding generated from the input content. | |
final ContentEmbedding? embedding; | |
EmbedContentResponse({ | |
this.embedding, | |
}) : super(fullyQualifiedName); | |
factory EmbedContentResponse.fromJson(Map<String, dynamic> json) { | |
return EmbedContentResponse( | |
embedding: decode(json['embedding'], ContentEmbedding.fromJson), | |
); | |
} | |
@override | |
Object toJson() { | |
return { | |
if (embedding != null) 'embedding': embedding!.toJson(), | |
}; | |
} | |
@override | |
String toString() => 'EmbedContentResponse()'; | |
} | |
/// Batch request to get embeddings from the model for a list of prompts. | |
class BatchEmbedContentsRequest extends ProtoMessage { | |
static const String fullyQualifiedName = | |
'google.ai.generativelanguage.v1.BatchEmbedContentsRequest'; | |
/// Required. The model's resource name. This serves as an ID for the Model to | |
/// use. | |
/// | |
/// This name should match a model name returned by the `ListModels` method. | |
/// | |
/// Format: `models/{model}` | |
final String model; | |
/// Required. Embed requests for the batch. The model in each of these requests | |
/// must match the model specified `BatchEmbedContentsRequest.model`. | |
final List<EmbedContentRequest>? requests; | |
BatchEmbedContentsRequest({ | |
required this.model, | |
this.requests, | |
}) : super(fullyQualifiedName); | |
factory BatchEmbedContentsRequest.fromJson(Map<String, dynamic> json) { | |
return BatchEmbedContentsRequest( | |
model: json['model'], | |
requests: | |
decodeListMessage(json['requests'], EmbedContentRequest.fromJson), | |
); | |
} | |
@override | |
Object toJson() { | |
return { | |
'model': model, | |
if (requests != null) 'requests': encodeList(requests), | |
}; | |
} | |
@override | |
String toString() { | |
final contents = [ | |
'model=$model', | |
].join(','); | |
return 'BatchEmbedContentsRequest($contents)'; | |
} | |
} | |
/// The response to a `BatchEmbedContentsRequest`. | |
class BatchEmbedContentsResponse extends ProtoMessage { | |
static const String fullyQualifiedName = | |
'google.ai.generativelanguage.v1.BatchEmbedContentsResponse'; | |
/// Output only. The embeddings for each request, in the same order as provided | |
/// in the batch request. | |
final List<ContentEmbedding>? embeddings; | |
BatchEmbedContentsResponse({ | |
this.embeddings, | |
}) : super(fullyQualifiedName); | |
factory BatchEmbedContentsResponse.fromJson(Map<String, dynamic> json) { | |
return BatchEmbedContentsResponse( | |
embeddings: | |
decodeListMessage(json['embeddings'], ContentEmbedding.fromJson), | |
); | |
} | |
@override | |
Object toJson() { | |
return { | |
if (embeddings != null) 'embeddings': encodeList(embeddings), | |
}; | |
} | |
@override | |
String toString() => 'BatchEmbedContentsResponse()'; | |
} | |
/// Counts the number of tokens in the `prompt` sent to a model. | |
/// | |
/// Models may tokenize text differently, so each model may return a different | |
/// `token_count`. | |
class CountTokensRequest extends ProtoMessage { | |
static const String fullyQualifiedName = | |
'google.ai.generativelanguage.v1.CountTokensRequest'; | |
/// Required. The model's resource name. This serves as an ID for the Model to | |
/// use. | |
/// | |
/// This name should match a model name returned by the `ListModels` method. | |
/// | |
/// Format: `models/{model}` | |
final String model; | |
/// Optional. The input given to the model as a prompt. This field is ignored | |
/// when `generate_content_request` is set. | |
final List<Content>? contents; | |
/// Optional. The overall input given to the `Model`. This includes the prompt | |
/// as well as other model steering information like [system | |
/// instructions](https://ai.google.dev/gemini-api/docs/system-instructions), | |
/// and/or function declarations for [function | |
/// calling](https://ai.google.dev/gemini-api/docs/function-calling). | |
/// `Model`s/`Content`s and `generate_content_request`s are mutually | |
/// exclusive. You can either send `Model` + `Content`s or a | |
/// `generate_content_request`, but never both. | |
final GenerateContentRequest? generateContentRequest; | |
CountTokensRequest({ | |
required this.model, | |
this.contents, | |
this.generateContentRequest, | |
}) : super(fullyQualifiedName); | |
factory CountTokensRequest.fromJson(Map<String, dynamic> json) { | |
return CountTokensRequest( | |
model: json['model'], | |
contents: decodeListMessage(json['contents'], Content.fromJson), | |
generateContentRequest: decode( | |
json['generateContentRequest'], GenerateContentRequest.fromJson), | |
); | |
} | |
@override | |
Object toJson() { | |
return { | |
'model': model, | |
if (contents != null) 'contents': encodeList(contents), | |
if (generateContentRequest != null) | |
'generateContentRequest': generateContentRequest!.toJson(), | |
}; | |
} | |
@override | |
String toString() { | |
final contents = [ | |
'model=$model', | |
].join(','); | |
return 'CountTokensRequest($contents)'; | |
} | |
} | |
/// A response from `CountTokens`. | |
/// | |
/// It returns the model's `token_count` for the `prompt`. | |
class CountTokensResponse extends ProtoMessage { | |
static const String fullyQualifiedName = | |
'google.ai.generativelanguage.v1.CountTokensResponse'; | |
/// The number of tokens that the `Model` tokenizes the `prompt` into. Always | |
/// non-negative. | |
final int? totalTokens; | |
CountTokensResponse({ | |
this.totalTokens, | |
}) : super(fullyQualifiedName); | |
factory CountTokensResponse.fromJson(Map<String, dynamic> json) { | |
return CountTokensResponse( | |
totalTokens: json['totalTokens'], | |
); | |
} | |
@override | |
Object toJson() { | |
return { | |
if (totalTokens != null) 'totalTokens': totalTokens, | |
}; | |
} | |
@override | |
String toString() { | |
final contents = [ | |
if (totalTokens != null) 'totalTokens=$totalTokens', | |
].join(','); | |
return 'CountTokensResponse($contents)'; | |
} | |
} | |
/// Information about a Generative Language Model. | |
class Model extends ProtoMessage { | |
static const String fullyQualifiedName = | |
'google.ai.generativelanguage.v1.Model'; | |
/// Required. The resource name of the `Model`. Refer to [Model | |
/// variants](https://ai.google.dev/gemini-api/docs/models/gemini#model-variations) | |
/// for all allowed values. | |
/// | |
/// Format: `models/{model}` with a `{model}` naming convention of: | |
/// | |
/// * "{base_model_id}-{version}" | |
/// | |
/// Examples: | |
/// | |
/// * `models/gemini-1.5-flash-001` | |
final String? name; | |
/// Required. The name of the base model, pass this to the generation request. | |
/// | |
/// Examples: | |
/// | |
/// * `gemini-1.5-flash` | |
final String? baseModelId; | |
/// Required. The version number of the model. | |
/// | |
/// This represents the major version (`1.0` or `1.5`) | |
final String? version; | |
/// The human-readable name of the model. E.g. "Gemini 1.5 Flash". | |
/// | |
/// The name can be up to 128 characters long and can consist of any UTF-8 | |
/// characters. | |
final String? displayName; | |
/// A short description of the model. | |
final String? description; | |
/// Maximum number of input tokens allowed for this model. | |
final int? inputTokenLimit; | |
/// Maximum number of output tokens available for this model. | |
final int? outputTokenLimit; | |
/// The model's supported generation methods. | |
/// | |
/// The corresponding API method names are defined as Pascal case | |
/// strings, such as `generateMessage` and `generateContent`. | |
final List<String>? supportedGenerationMethods; | |
/// Controls the randomness of the output. | |
/// | |
/// Values can range over `[0.0,max_temperature]`, inclusive. A higher value | |
/// will produce responses that are more varied, while a value closer to `0.0` | |
/// will typically result in less surprising responses from the model. | |
/// This value specifies default to be used by the backend while making the | |
/// call to the model. | |
final double? temperature; | |
/// The maximum temperature this model can use. | |
final double? maxTemperature; | |
/// For [Nucleus | |
/// sampling](https://ai.google.dev/gemini-api/docs/prompting-strategies#top-p). | |
/// | |
/// Nucleus sampling considers the smallest set of tokens whose probability | |
/// sum is at least `top_p`. | |
/// This value specifies default to be used by the backend while making the | |
/// call to the model. | |
final double? topP; | |
/// For Top-k sampling. | |
/// | |
/// Top-k sampling considers the set of `top_k` most probable tokens. | |
/// This value specifies default to be used by the backend while making the | |
/// call to the model. | |
/// If empty, indicates the model doesn't use top-k sampling, and `top_k` isn't | |
/// allowed as a generation parameter. | |
final int? topK; | |
Model({ | |
this.name, | |
this.baseModelId, | |
this.version, | |
this.displayName, | |
this.description, | |
this.inputTokenLimit, | |
this.outputTokenLimit, | |
this.supportedGenerationMethods, | |
this.temperature, | |
this.maxTemperature, | |
this.topP, | |
this.topK, | |
}) : super(fullyQualifiedName); | |
factory Model.fromJson(Map<String, dynamic> json) { | |
return Model( | |
name: json['name'], | |
baseModelId: json['baseModelId'], | |
version: json['version'], | |
displayName: json['displayName'], | |
description: json['description'], | |
inputTokenLimit: json['inputTokenLimit'], | |
outputTokenLimit: json['outputTokenLimit'], | |
supportedGenerationMethods: | |
decodeList(json['supportedGenerationMethods']), | |
temperature: decodeDouble(json['temperature']), | |
maxTemperature: decodeDouble(json['maxTemperature']), | |
topP: decodeDouble(json['topP']), | |
topK: json['topK'], | |
); | |
} | |
@override | |
Object toJson() { | |
return { | |
if (name != null) 'name': name, | |
if (baseModelId != null) 'baseModelId': baseModelId, | |
if (version != null) 'version': version, | |
if (displayName != null) 'displayName': displayName, | |
if (description != null) 'description': description, | |
if (inputTokenLimit != null) 'inputTokenLimit': inputTokenLimit, | |
if (outputTokenLimit != null) 'outputTokenLimit': outputTokenLimit, | |
if (supportedGenerationMethods != null) | |
'supportedGenerationMethods': supportedGenerationMethods, | |
if (temperature != null) 'temperature': encodeDouble(temperature), | |
if (maxTemperature != null) | |
'maxTemperature': encodeDouble(maxTemperature), | |
if (topP != null) 'topP': encodeDouble(topP), | |
if (topK != null) 'topK': topK, | |
}; | |
} | |
@override | |
String toString() { | |
final contents = [ | |
if (name != null) 'name=$name', | |
if (baseModelId != null) 'baseModelId=$baseModelId', | |
if (version != null) 'version=$version', | |
if (displayName != null) 'displayName=$displayName', | |
if (description != null) 'description=$description', | |
if (inputTokenLimit != null) 'inputTokenLimit=$inputTokenLimit', | |
if (outputTokenLimit != null) 'outputTokenLimit=$outputTokenLimit', | |
if (temperature != null) 'temperature=$temperature', | |
if (maxTemperature != null) 'maxTemperature=$maxTemperature', | |
if (topP != null) 'topP=$topP', | |
if (topK != null) 'topK=$topK', | |
].join(','); | |
return 'Model($contents)'; | |
} | |
} | |
/// Request for getting information about a specific Model. | |
class GetModelRequest extends ProtoMessage { | |
static const String fullyQualifiedName = | |
'google.ai.generativelanguage.v1.GetModelRequest'; | |
/// Required. The resource name of the model. | |
/// | |
/// This name should match a model name returned by the `ListModels` method. | |
/// | |
/// Format: `models/{model}` | |
final String name; | |
GetModelRequest({ | |
required this.name, | |
}) : super(fullyQualifiedName); | |
factory GetModelRequest.fromJson(Map<String, dynamic> json) { | |
return GetModelRequest( | |
name: json['name'], | |
); | |
} | |
@override | |
Object toJson() { | |
return { | |
'name': name, | |
}; | |
} | |
@override | |
String toString() { | |
final contents = [ | |
'name=$name', | |
].join(','); | |
return 'GetModelRequest($contents)'; | |
} | |
} | |
/// Request for listing all Models. | |
class ListModelsRequest extends ProtoMessage { | |
static const String fullyQualifiedName = | |
'google.ai.generativelanguage.v1.ListModelsRequest'; | |
/// The maximum number of `Models` to return (per page). | |
/// | |
/// If unspecified, 50 models will be returned per page. | |
/// This method returns at most 1000 models per page, even if you pass a larger | |
/// page_size. | |
final int? pageSize; | |
/// A page token, received from a previous `ListModels` call. | |
/// | |
/// Provide the `page_token` returned by one request as an argument to the next | |
/// request to retrieve the next page. | |
/// | |
/// When paginating, all other parameters provided to `ListModels` must match | |
/// the call that provided the page token. | |
final String? pageToken; | |
ListModelsRequest({ | |
this.pageSize, | |
this.pageToken, | |
}) : super(fullyQualifiedName); | |
factory ListModelsRequest.fromJson(Map<String, dynamic> json) { | |
return ListModelsRequest( | |
pageSize: json['pageSize'], | |
pageToken: json['pageToken'], | |
); | |
} | |
@override | |
Object toJson() { | |
return { | |
if (pageSize != null) 'pageSize': pageSize, | |
if (pageToken != null) 'pageToken': pageToken, | |
}; | |
} | |
@override | |
String toString() { | |
final contents = [ | |
if (pageSize != null) 'pageSize=$pageSize', | |
if (pageToken != null) 'pageToken=$pageToken', | |
].join(','); | |
return 'ListModelsRequest($contents)'; | |
} | |
} | |
/// Response from `ListModel` containing a paginated list of Models. | |
class ListModelsResponse extends ProtoMessage { | |
static const String fullyQualifiedName = | |
'google.ai.generativelanguage.v1.ListModelsResponse'; | |
/// The returned Models. | |
final List<Model>? models; | |
/// A token, which can be sent as `page_token` to retrieve the next page. | |
/// | |
/// If this field is omitted, there are no more pages. | |
final String? nextPageToken; | |
ListModelsResponse({ | |
this.models, | |
this.nextPageToken, | |
}) : super(fullyQualifiedName); | |
factory ListModelsResponse.fromJson(Map<String, dynamic> json) { | |
return ListModelsResponse( | |
models: decodeListMessage(json['models'], Model.fromJson), | |
nextPageToken: json['nextPageToken'], | |
); | |
} | |
@override | |
Object toJson() { | |
return { | |
if (models != null) 'models': encodeList(models), | |
if (nextPageToken != null) 'nextPageToken': nextPageToken, | |
}; | |
} | |
@override | |
String toString() { | |
final contents = [ | |
if (nextPageToken != null) 'nextPageToken=$nextPageToken', | |
].join(','); | |
return 'ListModelsResponse($contents)'; | |
} | |
} | |
/// Safety rating for a piece of content. | |
/// | |
/// The safety rating contains the category of harm and the | |
/// harm probability level in that category for a piece of content. | |
/// Content is classified for safety across a number of | |
/// harm categories and the probability of the harm classification is included | |
/// here. | |
class SafetyRating extends ProtoMessage { | |
static const String fullyQualifiedName = | |
'google.ai.generativelanguage.v1.SafetyRating'; | |
/// Required. The category for this rating. | |
final HarmCategory? category; | |
/// Required. The probability of harm for this content. | |
final SafetyRating_HarmProbability? probability; | |
/// Was this content blocked because of this rating? | |
final bool? blocked; | |
SafetyRating({ | |
this.category, | |
this.probability, | |
this.blocked, | |
}) : super(fullyQualifiedName); | |
factory SafetyRating.fromJson(Map<String, dynamic> json) { | |
return SafetyRating( | |
category: decodeEnum(json['category'], HarmCategory.fromJson), | |
probability: decodeEnum( | |
json['probability'], SafetyRating_HarmProbability.fromJson), | |
blocked: json['blocked'], | |
); | |
} | |
@override | |
Object toJson() { | |
return { | |
if (category != null) 'category': category!.toJson(), | |
if (probability != null) 'probability': probability!.toJson(), | |
if (blocked != null) 'blocked': blocked, | |
}; | |
} | |
@override | |
String toString() { | |
final contents = [ | |
if (category != null) 'category=$category', | |
if (probability != null) 'probability=$probability', | |
if (blocked != null) 'blocked=$blocked', | |
].join(','); | |
return 'SafetyRating($contents)'; | |
} | |
} | |
/// The probability that a piece of content is harmful. | |
/// | |
/// The classification system gives the probability of the content being | |
/// unsafe. This does not indicate the severity of harm for a piece of content. | |
class SafetyRating_HarmProbability extends ProtoEnum { | |
/// Probability is unspecified. | |
static const harmProbabilityUnspecified = | |
SafetyRating_HarmProbability('HARM_PROBABILITY_UNSPECIFIED'); | |
/// Content has a negligible chance of being unsafe. | |
static const negligible = SafetyRating_HarmProbability('NEGLIGIBLE'); | |
/// Content has a low chance of being unsafe. | |
static const low = SafetyRating_HarmProbability('LOW'); | |
/// Content has a medium chance of being unsafe. | |
static const medium = SafetyRating_HarmProbability('MEDIUM'); | |
/// Content has a high chance of being unsafe. | |
static const high = SafetyRating_HarmProbability('HIGH'); | |
const SafetyRating_HarmProbability(super.value); | |
factory SafetyRating_HarmProbability.fromJson(String json) => | |
SafetyRating_HarmProbability(json); | |
@override | |
String toString() => 'HarmProbability.$value'; | |
} | |
/// Safety setting, affecting the safety-blocking behavior. | |
/// | |
/// Passing a safety setting for a category changes the allowed probability that | |
/// content is blocked. | |
class SafetySetting extends ProtoMessage { | |
static const String fullyQualifiedName = | |
'google.ai.generativelanguage.v1.SafetySetting'; | |
/// Required. The category for this setting. | |
final HarmCategory? category; | |
/// Required. Controls the probability threshold at which harm is blocked. | |
final SafetySetting_HarmBlockThreshold? threshold; | |
SafetySetting({ | |
this.category, | |
this.threshold, | |
}) : super(fullyQualifiedName); | |
factory SafetySetting.fromJson(Map<String, dynamic> json) { | |
return SafetySetting( | |
category: decodeEnum(json['category'], HarmCategory.fromJson), | |
threshold: decodeEnum( | |
json['threshold'], SafetySetting_HarmBlockThreshold.fromJson), | |
); | |
} | |
@override | |
Object toJson() { | |
return { | |
if (category != null) 'category': category!.toJson(), | |
if (threshold != null) 'threshold': threshold!.toJson(), | |
}; | |
} | |
@override | |
String toString() { | |
final contents = [ | |
if (category != null) 'category=$category', | |
if (threshold != null) 'threshold=$threshold', | |
].join(','); | |
return 'SafetySetting($contents)'; | |
} | |
} | |
/// Block at and beyond a specified harm probability. | |
class SafetySetting_HarmBlockThreshold extends ProtoEnum { | |
/// Threshold is unspecified. | |
static const harmBlockThresholdUnspecified = | |
SafetySetting_HarmBlockThreshold('HARM_BLOCK_THRESHOLD_UNSPECIFIED'); | |
/// Content with NEGLIGIBLE will be allowed. | |
static const blockLowAndAbove = | |
SafetySetting_HarmBlockThreshold('BLOCK_LOW_AND_ABOVE'); | |
/// Content with NEGLIGIBLE and LOW will be allowed. | |
static const blockMediumAndAbove = | |
SafetySetting_HarmBlockThreshold('BLOCK_MEDIUM_AND_ABOVE'); | |
/// Content with NEGLIGIBLE, LOW, and MEDIUM will be allowed. | |
static const blockOnlyHigh = | |
SafetySetting_HarmBlockThreshold('BLOCK_ONLY_HIGH'); | |
/// All content will be allowed. | |
static const blockNone = SafetySetting_HarmBlockThreshold('BLOCK_NONE'); | |
/// Turn off the safety filter. | |
static const off = SafetySetting_HarmBlockThreshold('OFF'); | |
const SafetySetting_HarmBlockThreshold(super.value); | |
factory SafetySetting_HarmBlockThreshold.fromJson(String json) => | |
SafetySetting_HarmBlockThreshold(json); | |
@override | |
String toString() => 'HarmBlockThreshold.$value'; | |
} | |
/// Type of task for which the embedding will be used. | |
class TaskType extends ProtoEnum { | |
/// Unset value, which will default to one of the other enum values. | |
static const taskTypeUnspecified = TaskType('TASK_TYPE_UNSPECIFIED'); | |
/// Specifies the given text is a query in a search/retrieval setting. | |
static const retrievalQuery = TaskType('RETRIEVAL_QUERY'); | |
/// Specifies the given text is a document from the corpus being searched. | |
static const retrievalDocument = TaskType('RETRIEVAL_DOCUMENT'); | |
/// Specifies the given text will be used for STS. | |
static const semanticSimilarity = TaskType('SEMANTIC_SIMILARITY'); | |
/// Specifies that the given text will be classified. | |
static const classification = TaskType('CLASSIFICATION'); | |
/// Specifies that the embeddings will be used for clustering. | |
static const clustering = TaskType('CLUSTERING'); | |
/// Specifies that the given text will be used for question answering. | |
static const questionAnswering = TaskType('QUESTION_ANSWERING'); | |
/// Specifies that the given text will be used for fact verification. | |
static const factVerification = TaskType('FACT_VERIFICATION'); | |
const TaskType(super.value); | |
factory TaskType.fromJson(String json) => TaskType(json); | |
@override | |
String toString() => 'TaskType.$value'; | |
} | |
/// The category of a rating. | |
/// | |
/// These categories cover various kinds of harms that developers | |
/// may wish to adjust. | |
class HarmCategory extends ProtoEnum { | |
/// Category is unspecified. | |
static const harmCategoryUnspecified = | |
HarmCategory('HARM_CATEGORY_UNSPECIFIED'); | |
/// **PaLM** - Negative or harmful comments targeting identity and/or protected | |
/// attribute. | |
static const harmCategoryDerogatory = | |
HarmCategory('HARM_CATEGORY_DEROGATORY'); | |
/// **PaLM** - Content that is rude, disrespectful, or profane. | |
static const harmCategoryToxicity = HarmCategory('HARM_CATEGORY_TOXICITY'); | |
/// **PaLM** - Describes scenarios depicting violence against an individual or | |
/// group, or general descriptions of gore. | |
static const harmCategoryViolence = HarmCategory('HARM_CATEGORY_VIOLENCE'); | |
/// **PaLM** - Contains references to sexual acts or other lewd content. | |
static const harmCategorySexual = HarmCategory('HARM_CATEGORY_SEXUAL'); | |
/// **PaLM** - Promotes unchecked medical advice. | |
static const harmCategoryMedical = HarmCategory('HARM_CATEGORY_MEDICAL'); | |
/// **PaLM** - Dangerous content that promotes, facilitates, or encourages | |
/// harmful acts. | |
static const harmCategoryDangerous = HarmCategory('HARM_CATEGORY_DANGEROUS'); | |
/// **Gemini** - Harassment content. | |
static const harmCategoryHarassment = | |
HarmCategory('HARM_CATEGORY_HARASSMENT'); | |
/// **Gemini** - Hate speech and content. | |
static const harmCategoryHateSpeech = | |
HarmCategory('HARM_CATEGORY_HATE_SPEECH'); | |
/// **Gemini** - Sexually explicit content. | |
static const harmCategorySexuallyExplicit = | |
HarmCategory('HARM_CATEGORY_SEXUALLY_EXPLICIT'); | |
/// **Gemini** - Dangerous content. | |
static const harmCategoryDangerousContent = | |
HarmCategory('HARM_CATEGORY_DANGEROUS_CONTENT'); | |
/// **Gemini** - Content that may be used to harm civic integrity. | |
static const harmCategoryCivicIntegrity = | |
HarmCategory('HARM_CATEGORY_CIVIC_INTEGRITY'); | |
const HarmCategory(super.value); | |
factory HarmCategory.fromJson(String json) => HarmCategory(json); | |
@override | |
String toString() => 'HarmCategory.$value'; | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment