Created
January 14, 2025 10:13
-
-
Save tiancheng91/32831a435fc6b6b758d74ecf91715962 to your computer and use it in GitHub Desktop.
proxy ollma api to openai
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
package main | |
import ( | |
"bytes" | |
"encoding/json" | |
"fmt" | |
"io/ioutil" | |
"log" | |
"net/http" | |
) | |
// OpenAI API的端点和密钥 | |
const ( | |
openAIGenerateEndpoint = "https://api.openai.com/v1/completions" // 替换为实际的生成API端点 | |
openAIChatEndpoint = "https://api.openai.com/v1/chat/completions" // 替换为实际的聊天API端点 | |
openAIKey = "YOUR_OPENAI_API_KEY" // 替换为您的API密钥 | |
) | |
// Ollama生成请求结构 | |
type OllamaGenerateRequest struct { | |
Model string `json:"model"` | |
Prompt string `json:"prompt"` | |
Temperature float64 `json:"temperature"` | |
MaxTokens int `json:"max_tokens"` | |
} | |
// Ollama聊天请求结构 | |
type OllamaChatRequest struct { | |
Model string `json:"model"` | |
Messages []struct { | |
Role string `json:"role"` | |
Content string `json:"content"` | |
} `json:"messages"` | |
} | |
// OpenAI生成请求结构 | |
type OpenAIGenerateRequest struct { | |
Model string `json:"model"` | |
Prompt string `json:"prompt"` | |
Temperature float64 `json:"temperature"` | |
MaxTokens int `json:"max_tokens"` | |
} | |
// OpenAI聊天请求结构 | |
type OpenAIChatRequest struct { | |
Model string `json:"model"` | |
Messages []struct { | |
Role string `json:"role"` | |
Content string `json:"content"` | |
} `json:"messages"` | |
} | |
// 代理处理生成请求 | |
func generateHandler(w http.ResponseWriter, r *http.Request) { | |
var ollamaReq OllamaGenerateRequest | |
if err := json.NewDecoder(r.Body).Decode(&ollamaReq); err != nil { | |
http.Error(w, "Invalid request format", http.StatusBadRequest) | |
return | |
} | |
openAIReq := OpenAIGenerateRequest{ | |
Model: ollamaReq.Model, | |
Prompt: ollamaReq.Prompt, | |
Temperature: ollamaReq.Temperature, | |
MaxTokens: ollamaReq.MaxTokens, | |
} | |
openAIRequestBody, err := json.Marshal(openAIReq) | |
if err != nil { | |
http.Error(w, "Failed to create OpenAI request", http.StatusInternalServerError) | |
return | |
} | |
req, err := http.NewRequest("POST", openAIGenerateEndpoint, bytes.NewBuffer(openAIRequestBody)) | |
if err != nil { | |
http.Error(w, "Failed to create request to OpenAI", http.StatusInternalServerError) | |
return | |
} | |
req.Header.Set("Authorization", "Bearer "+openAIKey) | |
req.Header.Set("Content-Type", "application/json") | |
client := &http.Client{} | |
resp, err := client.Do(req) | |
if err != nil { | |
http.Error(w, "Failed to communicate with OpenAI", http.StatusInternalServerError) | |
return | |
} | |
defer resp.Body.Close() | |
body, err := ioutil.ReadAll(resp.Body) | |
if err != nil { | |
http.Error(w, "Failed to read OpenAI response", http.StatusInternalServerError) | |
return | |
} | |
w.Header().Set("Content-Type", "application/json") | |
w.WriteHeader(resp.StatusCode) | |
w.Write(body) | |
} | |
// 代理处理聊天请求 | |
func chatHandler(w http.ResponseWriter, r *http.Request) { | |
var ollamaReq OllamaChatRequest | |
if err := json.NewDecoder(r.Body).Decode(&ollamaReq); err != nil { | |
http.Error(w, "Invalid request format", http.StatusBadRequest) | |
return | |
} | |
openAIReq := OpenAIChatRequest{ | |
Model: ollamaReq.Model, | |
Messages: ollamaReq.Messages, | |
} | |
openAIRequestBody, err := json.Marshal(openAIReq) | |
if err != nil { | |
http.Error(w, "Failed to create OpenAI request", http.StatusInternalServerError) | |
return | |
} | |
req, err := http.NewRequest("POST", openAIChatEndpoint, bytes.NewBuffer(openAIRequestBody)) | |
if err != nil { | |
http.Error(w, "Failed to create request to OpenAI", http.StatusInternalServerError) | |
return | |
} | |
req.Header.Set("Authorization", "Bearer "+openAIKey) | |
req.Header.Set("Content-Type", "application/json") | |
client := &http.Client{} | |
resp, err := client.Do(req) | |
if err != nil { | |
http.Error(w, "Failed to communicate with OpenAI", http.StatusInternalServerError) | |
return | |
} | |
defer resp.Body.Close() | |
body, err := ioutil.ReadAll(resp.Body) | |
if err != nil { | |
http.Error(w, "Failed to read OpenAI response", http.StatusInternalServerError) | |
return | |
} | |
w.Header().Set("Content-Type", "application/json") | |
w.WriteHeader(resp.StatusCode) | |
w.Write(body) | |
} | |
func main() { | |
http.HandleFunc("/api/generate", generateHandler) // 处理生成请求 | |
http.HandleFunc("/api/chat", chatHandler) // 处理聊天请求 | |
fmt.Println("Starting proxy server on :8080") | |
if err := http.ListenAndServe(":8080", nil); err != nil { | |
log.Fatal(err) | |
} | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment