Skip to content

Instantly share code, notes, and snippets.

@blake41
Created March 19, 2025 13:46
Show Gist options
  • Save blake41/a0ea9807961e54bf4ab7bf18d4ba9dad to your computer and use it in GitHub Desktop.
Save blake41/a0ea9807961e54bf4ab7bf18d4ba9dad to your computer and use it in GitHub Desktop.
import { Injectable, OnModuleInit } from '@nestjs/common';
import { ConfigService } from '@nestjs/config';
import { generateText, generateObject, NoObjectGeneratedError } from 'ai';
import { createPortkey } from '@portkey-ai/vercel-provider';
import { anthropic } from '@ai-sdk/anthropic';
import { ApiRequestException } from '../common/errors/api.request.exception';
import { z } from 'zod';
@Injectable()
export class AIService implements OnModuleInit {
private defaultModel: string;
private defaultProvider: 'openai' | 'anthropic';
private portkeyClient: any;
constructor(private readonly configService: ConfigService) {}
async onModuleInit() {
const openaiApiKey = this.configService.get<string>('OPENAI_API_KEY');
const anthropicApiKey = this.configService.get<string>('ANTHROPIC_API_KEY');
const portkeyApiKey = this.configService.get<string>('PORTKEY_API_KEY');
if (!portkeyApiKey) {
throw new Error('PORTKEY_API_KEY is not set in environment variables');
}
// Get model and provider from config or use defaults
this.defaultModel = this.configService.get<string>('AI_MODEL') || 'gpt-4o-mini';
this.defaultProvider = this.configService.get<'openai' | 'anthropic'>('AI_PROVIDER') || 'openai';
// Initialize default Portkey client
this.portkeyClient = createPortkey({
apiKey: portkeyApiKey,
config: {
provider: this.defaultProvider,
api_key: this.defaultProvider === 'openai' ? openaiApiKey : anthropicApiKey,
override_params: {
model: this.defaultModel
}
}
});
if (!this.defaultModel) {
throw new Error('No model configured - neither AI_MODEL environment variable is set nor default model is working');
}
}
private getPortkeyClient(modelName: string, provider: 'openai' | 'anthropic') {
// If using default model and provider, return existing client
if (modelName === this.defaultModel && provider === this.defaultProvider) {
return this.portkeyClient;
}
// Create new client with model configuration
return createPortkey({
apiKey: this.configService.get<string>('PORTKEY_API_KEY'),
config: {
provider,
api_key: provider === 'openai'
? this.configService.get<string>('OPENAI_API_KEY')
: this.configService.get<string>('ANTHROPIC_API_KEY'),
override_params: {
model: modelName
}
}
});
}
/**
* Generates text using the configured AI model
* @param prompt The prompt to send to the model
* @param options Optional parameters for the completion
* @returns The generated text or structured object as JSON string when schema is provided
*/
async generateText(
prompt: string,
options: {
model?: string;
provider?: 'openai' | 'anthropic';
temperature?: number;
maxTokens?: number;
schema?: z.ZodTypeAny;
} = {},
) {
try {
const modelToUse = options.model || this.defaultModel;
const providerToUse = options.provider || this.defaultProvider;
const client = this.getPortkeyClient(modelToUse, providerToUse);
// Build chat model options, only including defined values
const chatModelOptions: Record<string, any> = {};
if (options.temperature !== undefined) chatModelOptions.temperature = options.temperature;
if (options.maxTokens !== undefined) chatModelOptions.maxTokens = options.maxTokens;
// If schema is provided, use generateObject for structured output
if (options.schema) {
try {
const { object } = await generateObject({
model: client.chatModel(modelToUse, chatModelOptions),
schema: options.schema,
prompt,
});
return JSON.stringify(object, null, 2);
} catch (error) {
if (NoObjectGeneratedError.isInstance(error)) {
console.log('NoObjectGeneratedError');
console.log('Cause:', error.cause);
console.log('Text:', error.text);
console.log('Response:', error.response);
console.log('Usage:', error.usage);
throw new ApiRequestException('Failed to generate structured output', 500, {
cause: error.cause,
text: error.text,
usage: error.usage
});
}
throw error;
}
}
// Otherwise use regular text generation
const { text } = await generateText({
model: client.chatModel(modelToUse, chatModelOptions),
prompt,
});
return text;
} catch (error) {
console.error('AI API Error:', {
message: error.message,
prompt: prompt,
details: error.response?.data || error.response || error,
});
// Check if it's an AI_APICallError
if (error.name === 'AI_APICallError') {
console.error('AI API Call Error Details:', {
url: error.url,
statusCode: error.statusCode,
responseHeaders: error.responseHeaders,
responseBody: error.responseBody,
isRetryable: error.isRetryable,
requestBodyValues: error.requestBodyValues
});
}
throw new ApiRequestException('Error generating AI completion', 500, {
error: error.message,
prompt: prompt,
details: error.response?.data || error.response || error,
isRetryable: error.isRetryable,
structuredOutput: !!options.schema
});
}
}
}
const response = await this.aiService.generateText(prompt, {
schema,
model: 'o1-2024-12-17'
});
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment