Skip to content

Instantly share code, notes, and snippets.

@mbleigh
Last active April 5, 2025 01:44
Show Gist options
  • Save mbleigh/80360aaf90cefb93039bef939d2d1f39 to your computer and use it in GitHub Desktop.
Save mbleigh/80360aaf90cefb93039bef939d2d1f39 to your computer and use it in GitHub Desktop.
Gemini 2.5 Pro image segmentation with Genkit

Gemini Image Segmentation with Genkit

Genkit Logo

This Gist shows a working example of Gemini 2.5 image segmentation working in Genkit JS.

The actual code for the image segmentation in indext.ts is small, most of the code is sample code using the sharp library to generate an image with segmented overlays.

Example Output

Segmented Image

import { gemini25ProExp0325, googleAI } from "@genkit-ai/googleai";
import { genkit, z } from "genkit";
import { readFileSync } from "node:fs";
import { generateOverlayImage } from "./mask";
const ai = genkit({ plugins: [googleAI()] });
export const segmentFlow = ai.defineFlow(
{
name: "segment",
inputSchema: z.object({
imageFile: z.string(),
itemsDesc: z.string(),
}),
},
async ({ itemsDesc, imageFile }) => {
const prompt = `Give the segmentation masks for ${
itemsDesc || "all clearly visible objects"
} in the image. Output a JSON list of segmentation masks where each entry contains the 2D bounding box in the key "box_2d", the segmentation mask in key \"mask\", and the text label in the key "label". Use descriptive labels.`;
const buffer = readFileSync(imageFile);
const { output } = await ai.generate({
prompt: [
{
media: { url: `data:image/jpeg;base64,${buffer.toString("base64")}` },
},
{
text: prompt,
},
],
model: gemini25ProExp0325,
});
return {
segmentedImage: await generateOverlayImage(imageFile, output),
masks: output,
};
}
);
import sharp, {
Sharp,
Metadata,
Kernel,
Color,
OverlayOptions,
Raw,
} from "sharp";
// Assuming 'color-string' library is installed
import colorString from "color-string";
// --- Type Definitions ---
interface MaskInfo {
width: number;
height: number;
channels: number; // Should always be 1 for grayscale mask data
size: number;
}
interface Dimensions {
imgHeight: number;
imgWidth: number;
}
// Simplified structure for the input JSON items
interface InputMaskItem {
box_2d: [number, number, number, number]; // [y0, x0, y1, x1] as percentages (0-1000)
mask: string; // Base64 encoded PNG string "data:image/png;base64,..."
label?: string;
}
// --- 1. SegmentationMask Class ---
/**
* Represents a segmentation mask with its bounding box, label,
* and raw mask pixel data (full image size, grayscale 0-255).
*/
class SegmentationMask {
readonly y0: number;
readonly x0: number;
readonly y1: number;
readonly x1: number;
readonly maskData: Buffer; // Full-size grayscale mask buffer
readonly maskInfo: MaskInfo; // { width, height, channels: 1, size }
readonly label: string;
/**
* Creates an instance of SegmentationMask.
* @param {number} y0 - Top bounding box pixel coordinate (inclusive).
* @param {number} x0 - Left bounding box pixel coordinate (inclusive).
* @param {number} y1 - Bottom bounding box pixel coordinate (inclusive).
* @param {number} x1 - Right bounding box pixel coordinate (inclusive).
* @param {Buffer} maskData - Buffer containing raw *full-size* grayscale mask data (0-255).
* @param {MaskInfo} maskInfo - Metadata about the mask buffer ({width, height, channels: 1}).
* @param {string} label - The label associated with the mask.
*/
constructor(
y0: number,
x0: number,
y1: number,
x1: number,
maskData: Buffer,
maskInfo: MaskInfo,
label: string
) {
this.y0 = y0;
this.x0 = x0;
this.y1 = y1;
this.x1 = x1;
this.maskData = maskData;
this.maskInfo = maskInfo;
this.label = label;
Object.freeze(this); // Make instance immutable
}
}
// --- 2. parseSegmentationMasks Function ---
/**
* Parses an array of prediction items containing segmentation data and extracts full-size masks.
*
* @async
* @param {InputMaskItem[]} items - Array of raw prediction objects.
* @param {Dimensions} dimensions - Image dimensions { imgHeight, imgWidth }.
* @returns {Promise<SegmentationMask[]>} A promise resolving to an array of SegmentationMask objects.
*/
async function parseSegmentationMasks(
items: InputMaskItem[],
{ imgHeight, imgWidth }: Dimensions
): Promise<SegmentationMask[]> {
if (!imgHeight || !imgWidth || imgHeight <= 0 || imgWidth <= 0) {
throw new Error("imgHeight and imgWidth must be positive numbers.");
}
const masks: SegmentationMask[] = [];
for (const item of items) {
try {
// Basic validation of item structure
if (
!item ||
typeof item !== "object" ||
!Array.isArray(item.box_2d) ||
item.box_2d.length !== 4 ||
typeof item.mask !== "string"
) {
console.warn(
"Skipping item with invalid structure or missing mask:",
item
);
continue;
}
// Calculate absolute bounding box coordinates from percentages (0-1000)
const absY0 = Math.floor((item.box_2d[0] / 1000) * imgHeight);
const absX0 = Math.floor((item.box_2d[1] / 1000) * imgWidth);
const absY1 = Math.ceil((item.box_2d[2] / 1000) * imgHeight); // Use ceil for end points for inclusivity? Check consistency.
const absX1 = Math.ceil((item.box_2d[3] / 1000) * imgWidth); // Use ceil for end points
// Validate bounding box coordinates relative to each other and image bounds
if (
absY0 >= absY1 ||
absX0 >= absX1 ||
absY0 < 0 ||
absX0 < 0 ||
absY1 > imgHeight || // Use > because coordinates are 0-based index
absX1 > imgWidth // Use >
) {
console.warn(
`Skipping item with invalid bounding box coordinates relative to image bounds [${imgWidth}x${imgHeight}]:`,
item.box_2d,
`Calculated: (${absY0},${absX0},${absY1},${absX1})`
);
continue;
}
const bboxHeight = absY1 - absY0;
const bboxWidth = absX1 - absX0;
// Validate bounding box dimensions (must be at least 1x1)
if (bboxHeight < 1 || bboxWidth < 1) {
console.warn(
"Skipping item with invalid bounding box dimensions (height/width < 1):",
item.box_2d,
`(${bboxWidth}x${bboxHeight})`
);
continue;
}
const label = item.label || "unknown";
const pngStr = item.mask;
if (!pngStr.startsWith("data:image/png;base64,")) {
console.warn(
"Skipping item with invalid mask format (not base64 PNG):",
label
);
continue;
}
// --- Decode and Process Mask ---
const base64Data = pngStr.replace(/^data:image\/png;base64,/, "");
const pngBuffer = Buffer.from(base64Data, "base64");
// Resize the decoded PNG mask to fit the bounding box using cubic interpolation
const resizedMaskSharp = sharp(pngBuffer)
.resize(bboxWidth, bboxHeight, {
kernel: sharp.kernel.cubic, // Use cubic interpolation
})
.grayscale(); // Ensure it's grayscale
const { data: resizedMaskBuffer, info: resizedMaskInfo } =
await resizedMaskSharp.raw().toBuffer({ resolveWithObject: true });
if (resizedMaskInfo.channels !== 1) {
console.warn(
`Resized mask for ${label} has ${resizedMaskInfo.channels} channels, expected 1. Forcing grayscale.`
);
// Although we called .grayscale(), sharp might behave unexpectedly with odd inputs.
// Re-process just in case. This is defensive.
const { data: finalResizedData, info: finalResizedInfo } = await sharp(
resizedMaskBuffer,
{ raw: resizedMaskInfo }
)
.grayscale()
.raw()
.toBuffer({ resolveWithObject: true });
// Overwrite if re-processing occurred
Object.assign(resizedMaskBuffer, finalResizedData);
Object.assign(resizedMaskInfo, finalResizedInfo);
}
// Create a blank (black) full-size background mask (grayscale)
// Pass create options directly to sharp constructor
const backgroundMaskSharp = sharp({
create: {
width: imgWidth,
height: imgHeight,
channels: 3, // Start with 3 channels for RGB background
background: { r: 0, g: 0, b: 0 }, // Black RGB
},
}).grayscale(); // Convert to grayscale
// Composite the resized mask onto the blank background at the bbox location
const { data: finalMaskData, info: finalMaskInfo } =
await backgroundMaskSharp
.composite([
{
input: resizedMaskBuffer,
raw: { width: bboxWidth, height: bboxHeight, channels: 1 }, // Specify format of input buffer
top: absY0,
left: absX0,
},
])
.raw()
.toBuffer({ resolveWithObject: true }); // Get final buffer and info
if (finalMaskInfo.channels !== 1) {
throw new Error(
`Composited final mask should be grayscale (1 channel) but has ${finalMaskInfo.channels}`
);
}
// --- Create SegmentationMask instance ---
const mask = new SegmentationMask(
absY0,
absX0,
absY1,
absX1,
finalMaskData, // The full-size composited mask buffer
{
// Ensure MaskInfo type compliance
width: finalMaskInfo.width,
height: finalMaskInfo.height,
channels: finalMaskInfo.channels,
size: finalMaskInfo.size,
},
label
);
masks.push(mask);
} catch (error) {
console.error("Error processing item:", item, error);
// Optionally re-throw or collect errors
}
}
return masks;
}
// --- 3. overlayMaskOnImage Function ---
/**
* Overlays a single *full-size grayscale mask* onto an image using a named color and alpha.
* Replicates the logic of Python's overlay_mask_on_img using manual alpha blending.
* The returned sharp instance contains the image with the mask applied.
*
* @async
* @param {Buffer | string | Sharp} imgInput - Base image (path, buffer, or sharp object).
* @param {Buffer} maskData - Full-size grayscale mask buffer (0-255).
* @param {MaskInfo} maskInfo - Metadata for maskData ({width, height, channels: 1}).
* @param {string} colorName - Standard color name string (e.g., 'red', 'blue').
* @param {number} [alpha=0.7] - Alpha transparency level (0.0 to 1.0) for the overlay color.
* @returns {Promise<Sharp>} A *new* sharp instance with the mask overlaid.
*/
async function overlayMaskOnImage(
imgInput: Buffer | string | Sharp,
maskData: Buffer,
maskInfo: MaskInfo,
colorName: string,
alpha: number = 0.7
): Promise<Sharp> {
if (!maskData || !maskInfo || maskInfo.channels !== 1) {
throw new Error(
"Invalid maskData or maskInfo provided. Requires grayscale buffer (1 channel) and info."
);
}
if (alpha < 0.0 || alpha > 1.0) {
throw new Error("Alpha must be between 0.0 and 1.0");
}
// Need color-string library: `pnpm add color-string @types/color-string` or `npm install color-string @types/color-string`
const parsedColor = colorString.get.rgb(colorName); // Returns [r, g, b] or null
if (!parsedColor) {
throw new Error(
`Invalid color name '${colorName}'. Check available CSS color names or provide hex/rgb.`
);
}
// Calculate the RGBA overlay color bytes (0-255 range) including user alpha
const overlayR = parsedColor[0];
const overlayG = parsedColor[1];
const overlayB = parsedColor[2];
// Alpha for the *overlay color itself*. How much it covers the background.
const overlayA = Math.round(alpha * 255); // User-defined alpha for the overlay color (0-255)
// Prepare base image: ensure RGBA format
// Prepare base image: ensure RGBA format
// Prepare base image: ensure RGBA format
let baseSharpInstance: Sharp;
if (imgInput instanceof sharp) {
// If input is already a Sharp instance, clone it using explicit cast
baseSharpInstance = (imgInput as Sharp).clone();
} else {
// If input is string or Buffer, create a new Sharp instance, casting input
baseSharpInstance = sharp(imgInput as string | Buffer);
}
// Now baseSharpInstance is guaranteed to be a Sharp instance
const baseSharpWithAlpha = baseSharpInstance.ensureAlpha(); // Ensure 4 channels (RGBA)
const { data: baseData, info: baseInfo } = await baseSharpWithAlpha
.raw()
.toBuffer({ resolveWithObject: true });
// Dimension check
if (
baseInfo.width !== maskInfo.width ||
baseInfo.height !== maskInfo.height
) {
throw new Error(
`Base image dimensions (${baseInfo.width}x${baseInfo.height}) do not match mask dimensions (${maskInfo.width}x${maskInfo.height}).`
);
}
// Channel check (ensureAlpha should handle this)
if (baseInfo.channels !== 4) {
throw new Error(
`Base image must have 4 channels (RGBA) after ensureAlpha(), but has ${baseInfo.channels}. This indicates an issue.`
);
}
const width = baseInfo.width;
const height = baseInfo.height;
const outputData = Buffer.from(baseData); // Create a *mutable copy* for the blended output
// --- Manual Alpha Blending (Pixel by Pixel) ---
for (let y = 0; y < height; y++) {
for (let x = 0; x < width; x++) {
const maskIdx = y * width + x; // Index for the single-channel mask buffer
const baseIdx = maskIdx * 4; // Index for the 4-channel RGBA base/output buffer
// Get mask value (0-255). Python code uses > 127 as threshold.
const maskValue = maskData[maskIdx];
if (maskValue > 127) {
// --- Apply Overlay Color using Standard Alpha Compositing (A over B) ---
// Formula: C_out = C_overlay * alpha_overlay + C_background * (1 - alpha_overlay)
// A_out = alpha_overlay + alpha_background * (1 - alpha_overlay)
// Where alpha_overlay is the `alpha` parameter passed to the function (overlayA / 255.0)
const bgR = baseData[baseIdx + 0];
const bgG = baseData[baseIdx + 1];
const bgB = baseData[baseIdx + 2];
const bgA = baseData[baseIdx + 3]; // Base image's alpha
const alphaF = overlayA / 255.0; // Overlay alpha factor (0.0 to 1.0)
const oneMinusAlphaF = 1.0 - alphaF;
// Blend RGB channels:
outputData[baseIdx + 0] = Math.round(
overlayR * alphaF + bgR * oneMinusAlphaF
);
outputData[baseIdx + 1] = Math.round(
overlayG * alphaF + bgG * oneMinusAlphaF
);
outputData[baseIdx + 2] = Math.round(
overlayB * alphaF + bgB * oneMinusAlphaF
);
// Blend Alpha channel:
// Combine the overlay alpha with the background alpha.
const outA = overlayA + bgA * oneMinusAlphaF; // Note: bgA is 0-255 here
outputData[baseIdx + 3] = Math.min(255, Math.round(outA)); // Clamp to 255
}
// Else (maskValue <= 127): Pixel is *not* part of the mask.
// We keep the original pixel from `baseData` which is already in `outputData` copy.
}
}
// --- Create a *new* sharp instance from the blended raw pixel data ---
// This is crucial: return sharp(outputData), not the original baseSharp or imgInput.
return sharp(outputData, {
raw: { width: width, height: height, channels: 4 }, // Specify the format of outputData
});
}
// --- 4. plotSegmentationMasks Function ---
/**
* Plots segmentation masks, bounding boxes, and labels on an image.
* Applies color overlays first, then adds SVG for boxes and text.
*
* @async
* @param {Buffer | string | Sharp} imgInput - Base image (path, buffer, or sharp object).
* @param {SegmentationMask[]} segmentationMasks - Array of SegmentationMask objects.
* @returns {Promise<Sharp>} A sharp instance with masks, boxes, and labels overlaid.
*/
async function plotSegmentationMasks(
imgInput: Buffer | string | Sharp,
segmentationMasks: SegmentationMask[]
): Promise<Sharp> {
// Define a list of colors (CSS names work well for SVG)
const colors: string[] = [
"red",
"lime",
"blue",
"yellow",
"fuchsia",
"aqua",
"orange",
"green",
"purple",
"olive",
"teal",
"maroon",
"lightcoral",
"lightgreen",
"lightblue",
"gold",
"violet",
"turquoise",
"darkred",
"darkgreen",
"darkblue",
"darkorange",
"deeppink",
"deepskyblue", // Add more distinct colors if needed
];
// Initialize currentSharp correctly based on input type
let currentSharp: Sharp;
if (imgInput instanceof sharp) {
// If input is already a Sharp instance, clone it using explicit cast
currentSharp = (imgInput as Sharp).clone();
} else {
// If input is string or Buffer, create a new Sharp instance, casting input
currentSharp = sharp(imgInput as string | Buffer);
}
// Now currentSharp is guaranteed to be a Sharp instance
// --- Pass 1: Overlay Masks Sequentially ---
// Each overlay modifies the image buffer used by the next overlay step.
for (let i = 0; i < segmentationMasks.length; i++) {
const mask = segmentationMasks[i];
if (!mask.maskData || !mask.maskInfo) {
console.warn(
`Skipping mask ${i} (${mask.label}) due to missing data/info.`
);
continue;
}
const color = colors[i % colors.length]; // Cycle through colors
try {
// Apply the overlay. overlayMaskOnImage returns a *new* sharp instance.
currentSharp = await overlayMaskOnImage(
currentSharp, // Pass the current sharp object (or its buffer)
mask.maskData,
mask.maskInfo,
color,
0.5 // Default alpha, maybe make this configurable?
);
} catch (error) {
console.error(`Error overlaying mask ${i} (${mask.label}):`, error);
// Decide whether to continue or re-throw
// throw error; // Uncomment to stop processing on first mask error
}
}
// --- Pass 2 & 3: Draw Bounding Boxes and Text via SVG Overlay ---
const metadata: Metadata = await currentSharp.metadata(); // Get final dimensions AFTER mask overlays
const { width, height } = metadata;
if (!width || !height) {
throw new Error("Could not get image dimensions after applying masks.");
}
const svgElements: string[] = [];
// Add Bounding Boxes
segmentationMasks.forEach((mask, i) => {
const color = colors[i % colors.length];
const boxWidth = mask.x1 - mask.x0;
const boxHeight = mask.y1 - mask.y0;
if (boxWidth > 0 && boxHeight > 0) {
// Ensure non-zero dimensions for rect
svgElements.push(
`<rect x="${mask.x0}" y="${mask.y0}" width="${boxWidth}" height="${boxHeight}" ` +
`stroke="${color}" stroke-width="3" fill="none" />` // Slightly thinner stroke?
);
} else {
console.warn(
`Skipping drawing zero-dimension box for mask ${i} (${mask.label})`
);
}
});
// Add Text Labels
const fontSize = Math.max(16, Math.min(Math.round(height / 25), 28)); // Increased dynamic font size (min 16, max 28, slightly larger ratio)
const textOffsetY = Math.max(3, Math.round(fontSize * 0.3)); // Adjusted offset
const textOffsetX = Math.max(3, Math.round(fontSize * 0.2)); // Adjusted offset
segmentationMasks.forEach((mask, i) => {
const color = colors[i % colors.length];
// Basic check for non-empty label
if (mask.label && mask.label.trim() !== "") {
// Simple SVG text escaping
const escapedLabel = mask.label
.replace(/&/g, "&")
.replace(/</g, "<")
.replace(/>/g, ">");
// Position text slightly above the top-left corner of the box
const textX = mask.x0 + textOffsetX;
// Ensure text doesn't go above the image (y=0)
const textY = Math.max(fontSize, mask.y0 - textOffsetY);
// SVG Text element with outline for better visibility
svgElements.push(
`<text x="${textX}" y="${textY}" ` +
`font-family="sans-serif" font-size="${fontSize}" fill="${color}" ` +
// `paint-order` makes stroke appear behind fill
`paint-order="stroke" stroke="black" stroke-width="1px" stroke-linecap="butt" stroke-linejoin="miter">` +
`${escapedLabel}</text>`
);
}
});
// Create the full SVG string
const svgOverlay =
`<svg width="${width}" height="${height}" xmlns="http://www.w3.org/2000/svg">` +
svgElements.join("\n ") + // Add newline for readability if debugging SVG
`</svg>`;
const svgBuffer = Buffer.from(svgOverlay);
// Composite the SVG onto the current sharp instance
try {
const compositeOptions: OverlayOptions = {
input: svgBuffer,
top: 0,
left: 0,
// density: 72 // Optional: Adjust DPI if needed, default usually okay
};
// Sharp's composite returns the same instance, modified.
currentSharp = currentSharp.composite([compositeOptions]);
} catch (error) {
console.error(`Error compositing SVG overlay:`, error);
// Re-throw or handle?
// throw error;
}
return currentSharp; // Return the final sharp instance with all overlays
}
// --- 5. generateOverlayImage Function ---
// Define allowed formats and their mimetypes
const FORMAT_TO_MIMETYPE: {
[key in "jpeg" | "png" | "webp" | "tiff" | "gif" | "avif" | "heif"]: string;
} = {
jpeg: "image/jpeg",
png: "image/png",
webp: "image/webp",
tiff: "image/tiff",
gif: "image/gif", // Sharp can output GIF
avif: "image/avif",
heif: "image/heif",
};
type SupportedFormat = keyof typeof FORMAT_TO_MIMETYPE;
/**
* Main function to load an image, parse masks, generate overlays, and return a base64 data URI.
*
* @async
* @param {string | Buffer} sourceImage - Path to the source image file or a Buffer containing image data.
* @param {InputMaskItem[]} jsonMasks - Array of raw mask prediction objects.
* @returns {Promise<string>} A base64 encoded data URI string (e.g., "data:image/png;base64,...").
* @throws {Error} If image metadata cannot be read, parsing fails, or overlay generation fails.
*/
export async function generateOverlayImage(
sourceImage: string | Buffer,
jsonMasks: InputMaskItem[]
): Promise<string> {
let metadata: Metadata;
try {
metadata = await sharp(sourceImage).metadata();
} catch (error) {
console.error("Error reading image metadata:", error);
throw new Error(
`Failed to read metadata from source image. Ensure it's a valid image. Error: ${
error instanceof Error ? error.message : String(error)
}`
);
}
const { height: imgHeight, width: imgWidth, format } = metadata;
if (!imgHeight || !imgWidth) {
throw new Error("Could not determine image dimensions from metadata.");
}
if (!format || !(format in FORMAT_TO_MIMETYPE)) {
throw new Error(
`Unsupported image format: ${format}. Supported formats: ${Object.keys(
FORMAT_TO_MIMETYPE
).join(", ")}`
);
}
const masks = await parseSegmentationMasks(jsonMasks, {
imgHeight,
imgWidth,
});
if (masks.length === 0) {
console.warn(
"No valid masks were parsed from the input data. Returning original image as data URI."
);
// Still return the original image as a data URI
const originalBuffer = await sharp(sourceImage).toBuffer();
const mimeType = FORMAT_TO_MIMETYPE[format as SupportedFormat];
return `data:${mimeType};base64,${originalBuffer.toString("base64")}`;
}
const finalSharpInstance = await plotSegmentationMasks(sourceImage, masks);
// Determine output format and mime type based on input, default to PNG if needed
const outputFormat = format as SupportedFormat; // Use the original format
const mimeType = FORMAT_TO_MIMETYPE[outputFormat];
// Convert the final sharp instance to a buffer in the determined format
let outputBuffer: Buffer;
try {
// Explicitly format the output buffer if necessary, though often not required if only compositing
outputBuffer = await finalSharpInstance.toFormat(outputFormat).toBuffer();
} catch (error) {
console.error(
`Error converting final image to buffer with format ${outputFormat}:`,
error
);
// Fallback to PNG?
console.warn("Falling back to PNG format for output buffer.");
try {
outputBuffer = await finalSharpInstance.png().toBuffer();
// If fallback works, update mimeType (though technically the original might be preferred)
// mimeType = FORMAT_TO_MIMETYPE.png;
} catch (pngError) {
console.error("Error converting final image to PNG buffer:", pngError);
throw new Error(
`Failed to generate final image buffer. Error: ${
pngError instanceof Error ? pngError.message : String(pngError)
}`
);
}
}
return `data:${mimeType};base64,${outputBuffer.toString("base64")}`;
}
// Example Usage (Conceptual - requires actual image and mask data)
/*
async function runExample() {
try {
const imagePath = 'path/to/your/image.jpg'; // Replace with your image path
const maskJsonData = [ // Replace with your actual mask data
{ box_2d: [100, 150, 300, 350], mask: 'data:image/png;base64,...', label: 'object1' },
{ box_2d: [400, 450, 600, 650], mask: 'data:image/png;base64,...', label: 'object2' },
];
const dataUri = await generateOverlayImage(imagePath, maskJsonData);
console.log('Generated Data URI:', dataUri.substring(0, 100) + '...'); // Print start of data URI
// You could then, for example, write this to an HTML file to display it:
// import fs from 'fs';
// const htmlContent = `<img src="${dataUri}" alt="Image with Segmentation Masks">`;
// fs.writeFileSync('output.html', htmlContent);
// console.log('Saved output.html');
} catch (error) {
console.error('Error generating overlay image:', error);
}
}
runExample();
*/
{
"name": "segmentation",
"version": "1.0.0",
"description": "",
"main": "index.js",
"scripts": {
"test": "echo \"Error: no test specified\" && exit 1"
},
"keywords": [],
"author": "",
"license": "ISC",
"packageManager": "[email protected]",
"dependencies": {
"@genkit-ai/googleai": "^1.4.0",
"@types/node": "^22.14.0",
"color-string": "^2.0.1",
"genkit": "^1.4.0",
"sharp": "^0.34.0"
},
"devDependencies": {
"@types/color-string": "^1.5.5",
"genkit-cli": "^1.4.0",
"tsx": "^4.19.3",
"typescript": "^5.8.3"
}
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment