Created
July 16, 2024 07:56
-
-
Save weixinfree/f1942a5a845dead31f892218b56433f3 to your computer and use it in GitHub Desktop.
code review by llm using groq
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| #! /usr/bin/env python3 | |
| from groq import Groq | |
| import pyperclip | |
| import re | |
| from fuzzywuzzy import process | |
| import fire | |
| from rich.console import Console | |
| from rich.markdown import Markdown | |
| def render_markdown(markdown_text): | |
| """ | |
| 在终端中渲染 Markdown 文本。 | |
| :param markdown_text: 要渲染的 Markdown 文本 | |
| """ | |
| # 创建一个 Console 实例 | |
| console = Console() | |
| # 创建 Markdown 对象 | |
| markdown = Markdown(markdown_text) | |
| # 在终端中渲染 Markdown | |
| console.print(markdown) | |
| def clean_code(code_string): | |
| # Remove import statements | |
| code_string = re.sub(r"^import .*$", "", code_string, flags=re.MULTILINE) | |
| # Remove package declarations | |
| code_string = re.sub(r"^package .*$", "", code_string, flags=re.MULTILINE) | |
| # Remove empty lines | |
| code_string = re.sub(r"^\s*$", "", code_string, flags=re.MULTILINE) | |
| return code_string[0:8000] | |
| CODE_REVIEW = """\ | |
| You are a highly skilled programming assistant, proficient in analyzing and optimizing complex code structures. Your expertise spans across multi-threading, multi-processing, and other advanced computer science disciplines. | |
| Please conduct a thorough review of the provided code and address the following points: | |
| 1. **Logical Correctness**: Assess whether the code functions as intended without logical errors. | |
| 2. **Exception Handling**: Identify potential risks such as NullPointerException and other runtime exceptions that could occur. | |
| 3. **Optimization Suggestions**: Propose improvements to enhance the code's efficiency, readability, and maintainability. | |
| Respond using Markdown format for clarity and structure. Your detailed analysis is greatly appreciated. | |
| **请一定要使用中文回复,我的朋友非常需要中文内容!!!**\ | |
| """ | |
| def ask_llm(code: str, prompt: str, model: str = "gemma2-9b-it"): | |
| render_markdown( | |
| f"""\ | |
| # INPUT META | |
| |key|value| | |
| |---|---| | |
| |model| {model}| | |
| |code| {len(code)}| | |
| # PROMPT | |
| {prompt} | |
| # ANSWER | |
| """ | |
| ) | |
| client = Groq() | |
| completion = client.chat.completions.create( | |
| model=model, | |
| messages=[ | |
| { | |
| "role": "system", | |
| "content": prompt, | |
| }, | |
| {"role": "user", "content": f"--- context ---\n{code}"}, | |
| ], | |
| temperature=1, | |
| max_tokens=1024, | |
| top_p=1, | |
| stream=True, | |
| stop=None, | |
| ) | |
| response = "".join([chunk.choices[0].delta.content or "" for chunk in completion]) | |
| render_markdown(response) | |
| print() | |
| def select_model(m: str) -> str: | |
| """ | |
| 根据参数 m 模糊匹配,返回模型名称 | |
| 支持的模型: | |
| mixtral-8x7b-32768 | |
| llama3-8b-8192 | |
| llama3-70b-8192 | |
| gemma2-9b-it | |
| gemma-7b-it | |
| """ | |
| models = [ | |
| "mixtral-8x7b-32768", | |
| "llama3-8b-8192", | |
| "llama3-70b-8192", | |
| "gemma2-9b-it", | |
| "gemma-7b-it", | |
| ] | |
| # 使用 fuzzywuzzy 进行模糊匹配 | |
| matched_model, score = process.extractOne(m, models) | |
| if score > 50: # 设置一个阈值,只有当匹配度大于50时才返回匹配的模型 | |
| return matched_model | |
| else: | |
| return "gemma2-9b-it" | |
| def main(model: str): | |
| code = pyperclip.paste() | |
| code = clean_code(code) | |
| prompt = CODE_REVIEW | |
| model = select_model(model) | |
| ask_llm(code, prompt, model) | |
| if __name__ == "__main__": | |
| fire.Fire(main) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment