Files
aethera/backend/internal/client/client_test.go
Evan Reichard 0dd9521419
All checks were successful
continuous-integration/drone/push Build is passing
fix: improve chat UI streaming feedback and fix test image path
- Add loading spinner with 'Thinking...' text during streaming when
  content is not yet available
- Fix :key binding to use message.id instead of message.content
- Remove unnecessary TypeScript cast in file reader handler
- Move test image into testdata/ directory for proper test organization
- Remove t.Skip and simplify TestSummarizeChat test message
2026-05-01 21:04:26 -04:00

138 lines
3.3 KiB
Go

package client
import (
"bytes"
"context"
"encoding/base64"
"net/url"
"os"
"testing"
"time"
"reichard.io/aethera/internal/store"
)
const model = "qwen3-8b-vision"
func TestSendMessage(t *testing.T) {
t.Skip("requires live LLM API - run manually with: go test -run TestSendMessage ./internal/client/")
// Initialize Client
baseURL, err := url.Parse("https://llm-api.va.reichard.io/v1")
if err != nil {
t.Fatalf("Failed to parse base URL: %v", err)
}
client := NewClient(baseURL)
// Create Context
ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second)
defer cancel()
// Generate Text Stream
var contentBuf, thinkingBuf bytes.Buffer
_, err = client.SendMessage(ctx, []*store.Message{{
Role: "user",
Content: "What is 2+2? Think step by step.",
}}, model, func(mc *MessageChunk) error {
if mc.Thinking != nil {
_, err := thinkingBuf.Write([]byte(*mc.Thinking))
return err
}
if mc.Message != nil {
_, err := contentBuf.Write([]byte(*mc.Message))
return err
}
return nil
})
if err != nil {
t.Fatalf("Failed to generate text stream: %v", err)
}
// Verify Thinking
thinking := thinkingBuf.String()
if thinking == "" {
t.Error("No thinking content was received")
} else {
t.Logf("Thinking (%d bytes): %s", len(thinking), thinking)
}
// Verify Content
output := contentBuf.String()
if output == "" {
t.Error("No content was written to the buffer")
} else {
t.Logf("Content (%d bytes): %s", len(output), output)
}
}
func TestSummarizeChat(t *testing.T) {
// Initialize Client
baseURL, err := url.Parse("https://llm-api.va.reichard.io/v1")
if err != nil {
t.Fatalf("Failed to parse base URL: %v", err)
}
client := NewClient(baseURL)
// Create Context
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
// Generate Text Stream
output, err := client.CreateTitle(ctx, "Hi!", model)
if err != nil {
t.Fatalf("Failed to generate text stream: %v", err)
}
// Verify Results
if output == "" {
t.Error("No content was written to the buffer")
} else {
t.Logf("Successfully received %d bytes from the stream", len(output))
t.Logf("Output: %s", output)
}
}
func TestSendMessageWithImage(t *testing.T) {
// Initialize Client
baseURL, err := url.Parse("https://llm-api.va.reichard.io/v1")
if err != nil {
t.Fatalf("Failed to parse base URL: %v", err)
}
client := NewClient(baseURL)
// Create Context
ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second)
defer cancel()
// Load Test Image and Convert to Base64 Data URL
imgData, err := os.ReadFile("./testdata/test_image.jpg")
if err != nil {
t.Fatalf("Failed to read test image: %v", err)
}
dataURL := "data:image/jpeg;base64," + base64.StdEncoding.EncodeToString(imgData)
// Generate Text Stream
var outputBuf bytes.Buffer
_, err = client.SendMessage(ctx, []*store.Message{{
Role: "user",
Content: "Describe this image in detail.",
Images: []string{dataURL},
}}, model, func(mc *MessageChunk) error {
if mc.Message != nil {
outputBuf.WriteString(*mc.Message)
}
return nil
})
if err != nil {
t.Fatalf("Failed to generate text stream: %v", err)
}
// Verify Response
output := outputBuf.String()
if output == "" {
t.Error("No content was written to the buffer")
} else {
t.Logf("Model response (%d chars): %s", len(output), output)
}
}