You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 

155 lines
4.0 KiB

package provider
import (
"context"
"errors"
"fmt"
"io"
openai "github.com/sashabaranov/go-openai"
)
// OpenAIProvider implements AIProvider for OpenAI-compatible APIs
// (OpenAI, Qwen, Zhipu, DeepSeek, etc.)
type OpenAIProvider struct {
client *openai.Client
name string
}
// NewOpenAIProvider creates a new OpenAI-compatible provider.
// baseUrl can be empty to use the default OpenAI endpoint.
func NewOpenAIProvider(baseUrl, apiKey string) *OpenAIProvider {
config := openai.DefaultConfig(apiKey)
if baseUrl != "" {
config.BaseURL = baseUrl
}
return &OpenAIProvider{
client: openai.NewClientWithConfig(config),
name: "openai_compat",
}
}
// Name returns the provider name.
func (p *OpenAIProvider) Name() string {
return p.name
}
// Chat sends a synchronous chat completion request.
func (p *OpenAIProvider) Chat(ctx context.Context, req *ChatRequest) (*ChatResponse, error) {
messages := convertToOpenAIMessages(req.Messages)
openaiReq := openai.ChatCompletionRequest{
Model: req.Model,
Messages: messages,
MaxTokens: req.MaxTokens,
Temperature: float32(req.Temperature),
}
resp, err := p.client.CreateChatCompletion(ctx, openaiReq)
if err != nil {
return nil, fmt.Errorf("openai chat completion failed: %w", err)
}
if len(resp.Choices) == 0 {
return nil, fmt.Errorf("openai chat completion returned no choices")
}
return &ChatResponse{
Content: resp.Choices[0].Message.Content,
Model: resp.Model,
InputTokens: resp.Usage.PromptTokens,
OutputTokens: resp.Usage.CompletionTokens,
FinishReason: string(resp.Choices[0].FinishReason),
}, nil
}
// ChatStream sends a streaming chat completion request. It returns a channel
// that delivers StreamChunk values. The channel is closed when the stream
// ends or an error occurs. The final chunk has Done=true and may include
// token usage if the API provides it.
func (p *OpenAIProvider) ChatStream(ctx context.Context, req *ChatRequest) (<-chan *StreamChunk, error) {
messages := convertToOpenAIMessages(req.Messages)
openaiReq := openai.ChatCompletionRequest{
Model: req.Model,
Messages: messages,
MaxTokens: req.MaxTokens,
Temperature: float32(req.Temperature),
Stream: true,
StreamOptions: &openai.StreamOptions{
IncludeUsage: true,
},
}
stream, err := p.client.CreateChatCompletionStream(ctx, openaiReq)
if err != nil {
return nil, fmt.Errorf("openai stream creation failed: %w", err)
}
ch := make(chan *StreamChunk, 64)
go func() {
defer close(ch)
defer stream.Close()
for {
response, err := stream.Recv()
if errors.Is(err, io.EOF) {
// Stream finished normally
select {
case ch <- &StreamChunk{Done: true}:
case <-ctx.Done():
}
return
}
if err != nil {
// Send error indication as the final chunk
select {
case ch <- &StreamChunk{
Content: fmt.Sprintf("[stream error: %v]", err),
Done: true,
FinishReason: "error",
}:
case <-ctx.Done():
}
return
}
chunk := &StreamChunk{}
// Extract content delta from choices
if len(response.Choices) > 0 {
chunk.Content = response.Choices[0].Delta.Content
if response.Choices[0].FinishReason != "" {
chunk.FinishReason = string(response.Choices[0].FinishReason)
}
}
// Extract usage from the final usage chunk (when StreamOptions.IncludeUsage is true)
if response.Usage != nil {
chunk.InputTokens = response.Usage.PromptTokens
chunk.OutputTokens = response.Usage.CompletionTokens
}
select {
case ch <- chunk:
case <-ctx.Done():
return
}
}
}()
return ch, nil
}
// convertToOpenAIMessages converts our unified ChatMessage slice to OpenAI format.
func convertToOpenAIMessages(messages []ChatMessage) []openai.ChatCompletionMessage {
result := make([]openai.ChatCompletionMessage, 0, len(messages))
for _, msg := range messages {
result = append(result, openai.ChatCompletionMessage{
Role: msg.Role,
Content: msg.Content,
})
}
return result
}