fix(client): support vLLM "reasoning" field for thinking blocks
All checks were successful
continuous-integration/drone/push Build is passing
All checks were successful
continuous-integration/drone/push Build is passing
vLLM sends thinking content in a "reasoning" delta field, unlike DeepSeek which uses "reasoning_content". Check both field names so thinking blocks render for vLLM-hosted models like qwen3.6-27b-thinking. Also update client tests to exercise thinking output and skip by default so they don't run in Drone CI (require live LLM API).
This commit is contained in:
@@ -10,9 +10,11 @@ import (
|
||||
"reichard.io/aethera/internal/store"
|
||||
)
|
||||
|
||||
const model = "devstral-small-2-instruct"
|
||||
const model = "vllm-qwen3.6-27b-thinking"
|
||||
|
||||
func TestSendMessage(t *testing.T) {
|
||||
t.Skip("requires live LLM API - run manually with: go test -run TestSendMessage ./internal/client/")
|
||||
|
||||
// Initialize Client
|
||||
baseURL, err := url.Parse("https://llm-api.va.reichard.io/v1")
|
||||
if err != nil {
|
||||
@@ -21,17 +23,21 @@ func TestSendMessage(t *testing.T) {
|
||||
client := NewClient(baseURL)
|
||||
|
||||
// Create Context
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Generate Text Stream
|
||||
var buf bytes.Buffer
|
||||
var contentBuf, thinkingBuf bytes.Buffer
|
||||
_, err = client.SendMessage(ctx, []*store.Message{{
|
||||
Role: "user",
|
||||
Content: "Hello, how are you?",
|
||||
Content: "What is 2+2? Think step by step.",
|
||||
}}, model, func(mc *MessageChunk) error {
|
||||
if mc.Thinking != nil {
|
||||
_, err := thinkingBuf.Write([]byte(*mc.Thinking))
|
||||
return err
|
||||
}
|
||||
if mc.Message != nil {
|
||||
_, err := buf.Write([]byte(*mc.Message))
|
||||
_, err := contentBuf.Write([]byte(*mc.Message))
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@@ -40,17 +46,26 @@ func TestSendMessage(t *testing.T) {
|
||||
t.Fatalf("Failed to generate text stream: %v", err)
|
||||
}
|
||||
|
||||
// Verify Results
|
||||
output := buf.String()
|
||||
// Verify Thinking
|
||||
thinking := thinkingBuf.String()
|
||||
if thinking == "" {
|
||||
t.Error("No thinking content was received")
|
||||
} else {
|
||||
t.Logf("Thinking (%d bytes): %s", len(thinking), thinking)
|
||||
}
|
||||
|
||||
// Verify Content
|
||||
output := contentBuf.String()
|
||||
if output == "" {
|
||||
t.Error("No content was written to the buffer")
|
||||
} else {
|
||||
t.Logf("Successfully received %d bytes from the stream", len(output))
|
||||
t.Logf("Output: %s", output)
|
||||
t.Logf("Content (%d bytes): %s", len(output), output)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSummarizeChat(t *testing.T) {
|
||||
t.Skip("requires live LLM API - run manually with: go test -run TestSummarizeChat ./internal/client/")
|
||||
|
||||
// Initialize Client
|
||||
baseURL, err := url.Parse("https://llm-api.va.reichard.io/v1")
|
||||
if err != nil {
|
||||
|
||||
Reference in New Issue
Block a user