Skip to content

Instantly share code, notes, and snippets.

@uratmangun
Created December 14, 2023 13:09
Show Gist options
  • Save uratmangun/f075203b455c6771fe5bd6102b774830 to your computer and use it in GitHub Desktop.
Save uratmangun/f075203b455c6771fe5bd6102b774830 to your computer and use it in GitHub Desktop.
commit message
import { GoogleGenerativeAI } from "@google/generative-ai";
import dotenv from 'dotenv';
import {execa} from 'execa';
dotenv.config();
import ky from 'ky';
const API_KEY = process.env.GOOGLE_API_KEY; // Replace with your actual API key
const systemMessage=`You are a commit message generator by creating exactly one commit message by the diff strings without adding unnecessary information! Here is the format of a good commit message from https://karma-runner.github.io/6.4/dev/git-commit-msg.html guides:
---
<type>(<scope>): <subject>
<BLANK LINE>
<body>
---
With allowed <type> values are feat, fix, perf, docs, style, refactor, test, and build.`;
const genAI = new GoogleGenerativeAI(API_KEY);
async function listModel(){
const response = await ky.get('https://generativelanguage.googleapis.com/v1beta/models?key='+API_KEY).json();
return response
}
async function countToken(text) {
try {
const response = await ky.post('https://generativelanguage.googleapis.com/v1beta/models/gemini-pro:countTokens?key='+API_KEY, {
headers: {
'Content-Type': 'application/json'
},
json: {
contents: [{
parts: [{
text
}]
}]
}
}).json();
console.log(response);
} catch (error) {
console.error(error);
}
}
async function run() {
const allModel=await listModel();
const geminiProTokenLimit=allModel.models[3].inputTokenLimit;
const {stdout} = await execa('git', ['diff'])
const model = genAI.getGenerativeModel({ model: "gemini-pro"});
const chat = model.startChat({
history: [
{
role: "user",
parts: systemMessage,
},
{
role: "model",
parts: `fix(middleware): ensure Range headers adhere more closely to RFC 2616
Add one new dependency, use \`range-parser\` (Express dependency) to compute range. It is more well-tested in the wild.`,
},
]
});
const { totalTokens } = await model.countTokens(stdout);
if(totalTokens<=geminiProTokenLimit){
const msg = stdout;
const result = await chat.sendMessage(msg);
const response = await result.response;
const text = response.text();
console.log(text);
}
}
run();
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment