Troubleshooting
Common issues and solutions when working with AgenticGoKit v1beta.
🎯 Quick Diagnostics
Check Configuration
import "github.com/agenticgokit/agenticgokit/v1beta"
agent, err := v1beta.NewBuilder("agent").
WithLLM("openai", "gpt-4").
Build()
if err != nil {
fmt.Printf("Error: %v\n", err)
fmt.Printf("Error code: %v\n", v1beta.GetErrorCode(err))
fmt.Printf("Suggestion: %v\n", v1beta.GetErrorSuggestion(err))
}
// Check agent capabilities
caps := agent.Capabilities()
fmt.Printf("Capabilities: %v\n", caps)
// Check configuration
config := agent.Config()
fmt.Printf("LLM: %s/%s\n", config.LLM.Provider, config.LLM.Model)Enable Debug Mode
agent, _ := v1beta.NewBuilder("agent").
WithLLM("openai", "gpt-4").
WithConfig(&v1beta.Config{
DebugMode: true, // Enable verbose logging
}).
Build()⚙️ Configuration Issues
Issue: Config File Not Found
Error:
CONFIG_NOT_FOUND: configuration file not foundSolutions:
- Use absolute path:
config, err := v1beta.LoadConfig("/absolute/path/to/config.toml")- Verify working directory:
wd, _ := os.Getwd()
fmt.Printf("Working directory: %s\n", wd)- Use code-based configuration:
config := &v1beta.Config{
Name: "MyAgent",
LLM: v1beta.LLMConfig{
Provider: "openai",
Model: "gpt-4",
},
}
agent, _ := v1beta.NewBuilder("agent").
WithConfig(config).
Build()Issue: Invalid Configuration Values
Error:
CONFIG_VALIDATION: temperature must be between 0.0 and 2.0Valid Ranges:
- Temperature: 0.0 - 2.0
- MaxTokens: 1 - model limit
- BufferSize: 1 - 10000 (recommended: 50-500)
- Timeout: > 0
Solution:
[llm]
provider = "openai"
model = "gpt-4"
temperature = 0.7 # 0.0-2.0
max_tokens = 1000 # positive
[streaming]
buffer_size = 100 # positive
flush_interval_ms = 100 # positiveIssue: Missing API Keys
Error:
LLM_AUTH: API key not found or invalidSolutions:
- Environment variables:
export OPENAI_API_KEY="sk-..."
export ANTHROPIC_API_KEY="..."
export AZURE_OPENAI_KEY="..."- In configuration:
[llm]
provider = "openai"
model = "gpt-4"
api_key = "${OPENAI_API_KEY}" # Use env var- Programmatically:
os.Setenv("OPENAI_API_KEY", "sk-...")📡 Streaming Issues
Issue: Stream Hangs
Symptoms:
stream.Wait()never returns- No chunks received
- Program frozen
Solutions:
- Use context with timeout:
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
stream, err := agent.RunStream(ctx, query)
if err != nil {
return err
}- Consume all chunks:
for chunk := range stream.Chunks() {
// Process each chunk
if chunk.Type == "error" {
log.Printf("Error: %v", chunk.Error)
break
}
}
result, err := stream.Wait() // Now completes- Check for errors in stream:
stream, err := agent.RunStream(ctx, query)
if err != nil {
return err
}
for chunk := range stream.Chunks() {
if chunk.Error != "" {
log.Printf("Stream error: %s", chunk.Error)
stream.Cancel()
break
}
}Issue: Memory Leak with Streams
Symptoms:
- Memory grows over time
- Goroutines accumulate
- Application slows down
Solutions:
- Always cancel unused streams:
stream, err := agent.RunStream(ctx, query)
if err != nil {
return err
}
// Cancel if not consuming
defer stream.Cancel()- Fully consume or cancel:
stream, err := agent.RunStream(ctx, query)
defer stream.Cancel() // Safety net
for chunk := range stream.Chunks() {
processChunk(chunk)
}- Use context cancellation:
ctx, cancel := context.WithCancel(context.Background())
defer cancel() // Cleanup all resources
stream, err := agent.RunStream(ctx, query)Issue: Slow Streaming
Symptoms:
- Chunks arrive slowly
- High latency
- UI sluggish
Solutions:
- Reduce buffer size:
stream, _ := agent.RunStream(ctx, query,
v1beta.WithBufferSize(25), // Smaller = faster first chunk
)- Reduce flush interval:
stream, _ := agent.RunStream(ctx, query,
v1beta.WithFlushInterval(50 * time.Millisecond),
)- Use text-only mode:
stream, _ := agent.RunStream(ctx, query,
v1beta.WithTextOnly(true), // Skip metadata
)🤖 LLM Provider Issues
Issue: OpenAI Rate Limiting
Error:
LLM_RATE_LIMITED: rate limit exceededSolutions:
- Implement exponential backoff:
import "github.com/agenticgokit/agenticgokit/v1beta"
func runWithRetry(agent v1beta.Agent, ctx context.Context, input string) (*v1beta.Result, error) {
for attempt := 1; attempt <= 5; attempt++ {
result, err := agent.Run(ctx, input)
if err == nil {
return result, nil
}
if v1beta.IsErrorCode(err, v1beta.ErrCodeLLMRateLimited) {
backoff := time.Duration(attempt*attempt) * time.Second
time.Sleep(backoff)
continue
}
return nil, err
}
return nil, fmt.Errorf("max retries exceeded")
}- Configure rate limiting:
[llm]
rate_limit = 10 # requests per second
max_concurrent = 3- Use different tier or model:
// Switch to higher tier model
agent, _ := v1beta.NewBuilder("agent").
WithLLM("openai", "gpt-3.5-turbo"). // Fewer restrictions
Build()Issue: Azure OpenAI Connection
Error:
LLM_CONNECTION: failed to connect to Azure OpenAISolutions:
- Verify endpoint and deployment:
[llm]
provider = "azure"
model = "your-deployment-name"
endpoint = "https://your-resource.openai.azure.com/"
api_key = "${AZURE_OPENAI_KEY}"
api_version = "2024-02-15-preview"- Check network access:
// Test connection
resp, err := http.Get("https://your-resource.openai.azure.com/")
if err != nil {
fmt.Printf("Network error: %v\n", err)
}- Verify credentials:
# Test with curl
curl -H "api-key: $AZURE_OPENAI_KEY" \
https://your-resource.openai.azure.com/openai/deployments/your-deployment/chat/completions?api-version=2024-02-15-previewIssue: Ollama Model Not Found
Error:
LLM_CALL_FAILED: model not foundSolutions:
- List available models:
ollama list- Pull the model:
ollama pull llama2
ollama pull mistral- Verify model name:
agent, _ := v1beta.NewBuilder("agent").
WithLLM("ollama", "llama2"). // Must match exact name
Build()🔧 Tool Integration Issues
Issue: MCP Server Not Connecting
Error:
MCP_CONNECTION: failed to connect to MCP serverSolutions:
- Verify server is running:
# For stdio servers
which mcp-server-filesystem
# For TCP servers
curl http://localhost:8080/health- Check server configuration:
mcpServer := v1beta.MCPServer{
Name: "filesystem",
Type: "stdio",
Command: "mcp-server-filesystem", // Must be in PATH
Enabled: true,
}
// Or for TCP
mcpServer := v1beta.MCPServer{
Name: "api",
Type: "tcp",
Address: "localhost",
Port: 8080, // Must be correct
Enabled: true,
}- Enable debug logging:
[tools.mcp]
enabled = true
connection_timeout = "30s"
max_retries = 3Issue: Tool Not Found
Error:
TOOL_NOT_FOUND: tool 'calculator' not foundSolutions:
- List available tools:
import "github.com/agenticgokit/agenticgokit/v1beta"
handler := func(ctx context.Context, input string, capabilities *v1beta.Capabilities) (string, error) {
tools := capabilities.Tools.List()
for _, tool := range tools {
fmt.Printf("Tool: %s - %s\n", tool.Name, tool.Description)
}
return capabilities.LLM("You are a helpful assistant.", input)
}- Check MCP server health:
handler := func(ctx context.Context, input string, capabilities *v1beta.Capabilities) (string, error) {
health := capabilities.Tools.HealthCheck(ctx)
for name, status := range health {
fmt.Printf("Server %s: %s\n", name, status.Status)
if status.Error != "" {
fmt.Printf(" Error: %s\n", status.Error)
}
}
return "", nil
}- Enable tool discovery:
agent, _ := v1beta.NewBuilder("agent").
WithLLM("openai", "gpt-4").
WithTools(
v1beta.WithMCPDiscovery(8080, 8081, 8090),
).
Build()Issue: Tool Timeout
Error:
TOOL_TIMEOUT: tool execution exceeded timeoutSolutions:
- Increase timeout:
agent, _ := v1beta.NewBuilder("agent").
WithLLM("openai", "gpt-4").
WithTools(
v1beta.WithMCP(servers...),
v1beta.WithToolTimeout(60 * time.Second), // Increase
).
Build()- Configure per-tool:
[tools]
timeout = "30s"
[tools.timeouts]
web_scraper = "120s" # Slow tool
calculator = "5s" # Fast tool- Optimize tool implementation:
// Add caching to slow tools
// Use connection pooling
// Implement early returns💾 Memory Issues
Issue: Memory Connection Failed
Error:
MEMORY_CONNECTION: failed to connect to pgvectorSolutions:
- Verify connection string:
[memory]
provider = "pgvector"
connection_string = "postgresql://user:password@localhost:5432/dbname"- Test connection:
psql "postgresql://user:password@localhost:5432/dbname" -c "\l"- Check pgvector extension:
CREATE EXTENSION IF NOT EXISTS vector;- Use fallback provider:
agent, _ := v1beta.NewBuilder("agent").
WithLLM("openai", "gpt-4").
WithMemory(
v1beta.WithMemoryProvider("memory"), // Fallback to in-memory
).
Build()Issue: Memory Not Persisting
Symptoms:
- Agent doesn't remember previous conversations
- Session data lost
Solutions:
- Enable session-scoped memory:
agent, _ := v1beta.NewBuilder("agent").
WithLLM("openai", "gpt-4").
WithMemory(
v1beta.WithMemoryProvider("memory"),
v1beta.WithSessionScoped(),
).
Build()- Use persistent provider:
agent, _ := v1beta.NewBuilder("agent").
WithLLM("openai", "gpt-4").
WithMemory(
v1beta.WithMemoryProvider("pgvector"),
v1beta.WithSessionScoped(),
).
Build()- Verify session ID is set:
import "github.com/agenticgokit/agenticgokit/core"
provider := core.GetMemoryProvider("memory")
if provider != nil {
provider.SetSession("user-123") // Set session ID
}🔀 Workflow Issues
Issue: Workflow Cycle Detected
Error:
WORKFLOW_CYCLE_DETECTED: circular dependency foundSolution:
Check workflow dependencies:
// ❌ Bad: Circular dependency
workflow, _ := v1beta.NewDAGWorkflow("pipeline",
v1beta.Step("a", agent1, "task", "b"), // depends on b
v1beta.Step("b", agent2, "task", "a"), // depends on a
)
// ✅ Good: Linear dependencies
workflow, _ := v1beta.NewDAGWorkflow("pipeline",
v1beta.Step("a", agent1, "task"),
v1beta.Step("b", agent2, "task", "a"),
v1beta.Step("c", agent3, "task", "b"),
)Issue: Workflow Step Failed
Error:
WORKFLOW_STEP_FAILED: step 'analysis' failedSolutions:
- Add error handling:
workflow, _ := v1beta.NewSequentialWorkflow("pipeline",
v1beta.Step("step1", agent1, "task"),
v1beta.Step("step2_optional", agent2, "task"), // Can fail
v1beta.Step("step3", agent3, "task"),
)
// Continue on non-critical failures- Use fallback agents:
// Try primary, fallback to secondary
step := v1beta.Step("analysis", primaryAgent, "task")
// If fails, use secondaryAgent- Add retry logic:
[workflow]
max_retries = 3
retry_delay = "1s"🏗️ Build and Runtime Issues
Issue: Import Errors
Error:
cannot find package "github.com/agenticgokit/agenticgokit/v1beta"Solutions:
- Update dependencies:
go get -u github.com/agenticgokit/agenticgokit/v1beta
go mod tidy- Verify import path:
import "github.com/agenticgokit/agenticgokit/v1beta"
// Not:
// import "github.com/agenticgokit/agenticgokit/core/vnext"- Check go.mod:
module your-app
go 1.21
require github.com/agenticgokit/agenticgokit v0.5.0Issue: Plugin Not Loading
Error:
plugin not found: memory/pgvectorSolutions:
- Import plugin:
import (
"github.com/agenticgokit/agenticgokit/v1beta"
_ "github.com/agenticgokit/agenticgokit/plugins/memory/pgvector" // Register
)- Verify plugin is installed:
go list -m github.com/agenticgokit/agenticgokit/plugins/memory/pgvector- Use blank import for side effects:
import (
_ "github.com/agenticgokit/agenticgokit/plugins/llm/openai"
_ "github.com/agenticgokit/agenticgokit/plugins/memory/memory"
)🐛 Common Error Messages
"context deadline exceeded"
Cause: Operation took too long
Solution:
// Increase timeout
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
defer cancel()
result, err := agent.Run(ctx, query)"API key not found"
Cause: Missing or incorrect API key
Solution:
export OPENAI_API_KEY="sk-...""model not found"
Cause: Invalid model name
Solution:
// Verify model name
agent, _ := v1beta.NewBuilder("agent").
WithLLM("openai", "gpt-4"). // Check spelling
Build()"rate limit exceeded"
Cause: Too many requests
Solution: Implement retry with backoff (see LLM Provider Issues)
"connection refused"
Cause: Service not running or wrong address
Solution:
# Check if service is running
netstat -an | grep 8080
# Verify address
curl http://localhost:8080🔍 Debugging Tips
Enable Verbose Logging
config := &v1beta.Config{
DebugMode: true,
LLM: v1beta.LLMConfig{
Provider: "openai",
Model: "gpt-4",
},
}Inspect Agent State
import "github.com/agenticgokit/agenticgokit/v1beta"
agent, _ := v1beta.NewBuilder("agent").
WithLLM("openai", "gpt-4").
Build()
// Check configuration
config := agent.Config()
fmt.Printf("Config: %+v\n", config)
// Check capabilities
caps := agent.Capabilities()
fmt.Printf("Capabilities: %v\n", caps)Test Components Individually
// Test LLM connection
agent, err := v1beta.NewBuilder("test").
WithLLM("openai", "gpt-4").
Build()
result, err := agent.Run(context.Background(), "Hello")
if err != nil {
fmt.Printf("LLM test failed: %v\n", err)
}
// Test tools separately
// Test memory separatelyCheck Error Details
import "github.com/agenticgokit/agenticgokit/v1beta"
result, err := agent.Run(ctx, input)
if err != nil {
fmt.Printf("Error: %v\n", err)
fmt.Printf("Code: %v\n", v1beta.GetErrorCode(err))
fmt.Printf("Suggestion: %v\n", v1beta.GetErrorSuggestion(err))
details := v1beta.GetErrorDetails(err)
fmt.Printf("Details: %+v\n", details)
}📚 Getting Help
Check Documentation
- Getting Started - Basic setup
- Error Handling - Error patterns
- Performance - Optimization tips
- Configuration - All settings
Enable Debug Mode
debug_mode = true
[llm]
provider = "openai"
model = "gpt-4"
debug = true
[tools]
enabled = true
debug = true
[memory]
provider = "memory"
debug = trueReport Issues
When reporting issues, include:
- Error message and code
- Minimal reproduction code
- Configuration (redact sensitive info)
- Go version and OS
- AgenticGoKit version
Still stuck? Check GitHub Issues or join our community chat.