Files
aethera/backend/internal/client/client_test.go
Evan Reichard 74b8d43032 refactor!: move LLM configuration from in-app settings to CLI/env vars
- Remove `api_endpoint` from Settings model and settings UI
- Add `--llm-endpoint` / `AETHERA_LLM_ENDPOINT` and `--llm-key` /
  `AETHERA_LLM_KEY` CLI flags (endpoint is required)
- Update client constructor to accept API key parameter
- Update tests and documentation to reflect new configuration approach

BREAKING CHANGE: LLM endpoint and key must now be provided via
`AETHERA_LLM_ENDPOINT` and `AETHERA_LLM_KEY` environment variables or
CLI flags instead of the Settings page.
2026-05-01 23:30:34 -04:00

136 lines
3.3 KiB
Go

package client
import (
"bytes"
"context"
"encoding/base64"
"net/url"
"os"
"testing"
"time"
"reichard.io/aethera/internal/store"
)
const model = "qwen3.5-9b-thinking"
func TestSendMessage(t *testing.T) {
// Initialize Client
baseURL, err := url.Parse("https://llm-api.va.reichard.io/v1")
if err != nil {
t.Fatalf("Failed to parse base URL: %v", err)
}
client := NewClient(baseURL, os.Getenv("AETHERA_LLM_KEY"))
// Create Context
ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second)
defer cancel()
// Generate Text Stream
var contentBuf, thinkingBuf bytes.Buffer
_, err = client.SendMessage(ctx, []*store.Message{{
Role: "user",
Content: "What is 2+2? Think step by step.",
}}, model, func(mc *MessageChunk) error {
if mc.Thinking != nil {
_, err := thinkingBuf.Write([]byte(*mc.Thinking))
return err
}
if mc.Message != nil {
_, err := contentBuf.Write([]byte(*mc.Message))
return err
}
return nil
})
if err != nil {
t.Fatalf("Failed to generate text stream: %v", err)
}
// Verify Thinking
thinking := thinkingBuf.String()
if thinking == "" {
t.Error("No thinking content was received")
} else {
t.Logf("Thinking (%d bytes): %s", len(thinking), thinking)
}
// Verify Content
output := contentBuf.String()
if output == "" {
t.Error("No content was written to the buffer")
} else {
t.Logf("Content (%d bytes): %s", len(output), output)
}
}
func TestSummarizeChat(t *testing.T) {
// Initialize Client
baseURL, err := url.Parse("https://llm-api.va.reichard.io/v1")
if err != nil {
t.Fatalf("Failed to parse base URL: %v", err)
}
client := NewClient(baseURL, os.Getenv("AETHERA_LLM_KEY"))
// Create Context
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
// Generate Text Stream
output, err := client.CreateTitle(ctx, "Hi!", model)
if err != nil {
t.Fatalf("Failed to generate text stream: %v", err)
}
// Verify Results
if output == "" {
t.Error("No content was written to the buffer")
} else {
t.Logf("Successfully received %d bytes from the stream", len(output))
t.Logf("Output: %s", output)
}
}
func TestSendMessageWithImage(t *testing.T) {
// Initialize Client
baseURL, err := url.Parse("https://llm-api.va.reichard.io/v1")
if err != nil {
t.Fatalf("Failed to parse base URL: %v", err)
}
client := NewClient(baseURL, os.Getenv("AETHERA_LLM_KEY"))
// Create Context
ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second)
defer cancel()
// Load Test Image and Convert to Base64 Data URL
imgData, err := os.ReadFile("./testdata/test_image.jpg")
if err != nil {
t.Fatalf("Failed to read test image: %v", err)
}
dataURL := "data:image/jpeg;base64," + base64.StdEncoding.EncodeToString(imgData)
// Generate Text Stream
var outputBuf bytes.Buffer
_, err = client.SendMessage(ctx, []*store.Message{{
Role: "user",
Content: "Describe this image in detail.",
Images: []string{dataURL},
}}, model, func(mc *MessageChunk) error {
if mc.Message != nil {
outputBuf.WriteString(*mc.Message)
}
return nil
})
if err != nil {
t.Fatalf("Failed to generate text stream: %v", err)
}
// Verify Response
output := outputBuf.String()
if output == "" {
t.Error("No content was written to the buffer")
} else {
t.Logf("Model response (%d chars): %s", len(output), output)
}
}