Files
aethera/backend/internal/client/client_test.go
Evan Reichard c51c0ab070
All checks were successful
continuous-integration/drone/push Build is passing
fix(client): support vLLM "reasoning" field for thinking blocks
vLLM sends thinking content in a "reasoning" delta field, unlike
DeepSeek which uses "reasoning_content". Check both field names so
thinking blocks render for vLLM-hosted models like qwen3.6-27b-thinking.

Also update client tests to exercise thinking output and skip by default
so they don't run in Drone CI (require live LLM API).
2026-04-30 21:55:05 -04:00

95 lines
2.4 KiB
Go

package client
import (
"bytes"
"context"
"net/url"
"testing"
"time"
"reichard.io/aethera/internal/store"
)
const model = "vllm-qwen3.6-27b-thinking"
func TestSendMessage(t *testing.T) {
t.Skip("requires live LLM API - run manually with: go test -run TestSendMessage ./internal/client/")
// Initialize Client
baseURL, err := url.Parse("https://llm-api.va.reichard.io/v1")
if err != nil {
t.Fatalf("Failed to parse base URL: %v", err)
}
client := NewClient(baseURL)
// Create Context
ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second)
defer cancel()
// Generate Text Stream
var contentBuf, thinkingBuf bytes.Buffer
_, err = client.SendMessage(ctx, []*store.Message{{
Role: "user",
Content: "What is 2+2? Think step by step.",
}}, model, func(mc *MessageChunk) error {
if mc.Thinking != nil {
_, err := thinkingBuf.Write([]byte(*mc.Thinking))
return err
}
if mc.Message != nil {
_, err := contentBuf.Write([]byte(*mc.Message))
return err
}
return nil
})
if err != nil {
t.Fatalf("Failed to generate text stream: %v", err)
}
// Verify Thinking
thinking := thinkingBuf.String()
if thinking == "" {
t.Error("No thinking content was received")
} else {
t.Logf("Thinking (%d bytes): %s", len(thinking), thinking)
}
// Verify Content
output := contentBuf.String()
if output == "" {
t.Error("No content was written to the buffer")
} else {
t.Logf("Content (%d bytes): %s", len(output), output)
}
}
func TestSummarizeChat(t *testing.T) {
t.Skip("requires live LLM API - run manually with: go test -run TestSummarizeChat ./internal/client/")
// Initialize Client
baseURL, err := url.Parse("https://llm-api.va.reichard.io/v1")
if err != nil {
t.Fatalf("Failed to parse base URL: %v", err)
}
client := NewClient(baseURL)
// Create Context
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
// Generate Text Stream
userMessage := "Write me a go program that reads in a zip file and prints the contents along with their sizes and mimetype."
output, err := client.CreateTitle(ctx, userMessage, model)
if err != nil {
t.Fatalf("Failed to generate text stream: %v", err)
}
// Verify Results
if output == "" {
t.Error("No content was written to the buffer")
} else {
t.Logf("Successfully received %d bytes from the stream", len(output))
t.Logf("Output: %s", output)
}
}