agent-team/internal/llm/client.go
scorpio de773586c7 feat: implement full agent-team platform
Go backend:
- LLM client with DeepSeek/Kimi/Ollama/OpenAI support (OpenAI-compat)
- Agent loader: AGENT.md frontmatter, SOUL.md, memory read/write
- Skill system following agentskills.io standard
- Room orchestration: master assign→execute→review loop with streaming
- Hub: GitHub repo clone and team package install
- Echo HTTP server with WebSocket and full REST API

React frontend:
- Discord-style 3-panel layout with Tailwind v4
- Zustand store with WebSocket streaming message handling
- Chat view: streaming messages, role styles, right panel, drawer buttons
- Agent MD editor with Monaco Editor (AGENT.md + SOUL.md)
- Market page for GitHub team install/publish

Docs:
- plan.md with full progress tracking and next steps

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-03-04 21:57:46 +08:00

86 lines
1.8 KiB
Go

package llm
import (
"context"
"fmt"
"os"
openai "github.com/sashabaranov/go-openai"
)
var providers = map[string]string{
"deepseek": "https://api.deepseek.com/v1",
"kimi": "https://api.moonshot.cn/v1",
"ollama": "http://localhost:11434/v1",
"openai": "https://api.openai.com/v1",
}
var defaultModels = map[string]string{
"deepseek": "deepseek-chat",
"kimi": "moonshot-v1-8k",
"ollama": "qwen2.5",
"openai": "gpt-4o",
}
type Client struct {
c *openai.Client
model string
}
func New(provider, model, baseURL, apiKeyEnv string) (*Client, error) {
if baseURL == "" {
var ok bool
baseURL, ok = providers[provider]
if !ok {
baseURL = providers["deepseek"]
}
}
if model == "" {
model = defaultModels[provider]
if model == "" {
model = "deepseek-chat"
}
}
apiKey := os.Getenv(apiKeyEnv)
if apiKey == "" {
apiKey = "ollama" // ollama doesn't need a real key
}
cfg := openai.DefaultConfig(apiKey)
cfg.BaseURL = baseURL
return &Client{c: openai.NewClientWithConfig(cfg), model: model}, nil
}
type Message = openai.ChatCompletionMessage
func NewMsg(role, content string) Message {
return Message{Role: role, Content: content}
}
// Stream calls the LLM and streams tokens to the callback. Returns full response.
func (c *Client) Stream(ctx context.Context, msgs []Message, onToken func(string)) (string, error) {
req := openai.ChatCompletionRequest{
Model: c.model,
Messages: msgs,
Stream: true,
}
stream, err := c.c.CreateChatCompletionStream(ctx, req)
if err != nil {
return "", fmt.Errorf("llm stream: %w", err)
}
defer stream.Close()
var full string
for {
resp, err := stream.Recv()
if err != nil {
break
}
delta := resp.Choices[0].Delta.Content
full += delta
if onToken != nil {
onToken(delta)
}
}
return full, nil
}