Output & Scripting

Master machine-readable output and automation scripting.

πŸ“Š Output Formats

JSON Output (Machine-Readable)

from glaip_sdk import Client

client = Client()
agent = client.create_agent(
    name="data-agent",
    instruction="Provide structured data in JSON format."
)

# Get JSON output
response = agent.run("List 5 programming languages", renderer="json")
print(response)  # JSON formatted output

Markdown Output (Documentation)

# Get markdown output
response = agent.run("Create a table of planets", renderer="markdown")
print(response)  # Markdown formatted output

Plain Text Output (Simple)

# Get plain text output
response = agent.run("Write a simple story", renderer="plain")
print(response)  # Plain text output

Use --view json for scripting and automation. Use --view md for documentation generation.

πŸ”„ Scripting and Automation

Basic Scripting

#!/usr/bin/env python3
"""Automated agent runner script."""

from glaip_sdk import Client
import json

def run_agent_analysis():
    client = Client()

    # Create agent
    agent = client.create_agent(
        name="analysis-agent",
        instruction="Analyze data and provide insights."
    )

    try:
        # Run analysis
        response = agent.run(
            "Analyze the current market trends",
            renderer="json"
        )

        # Parse JSON response
        data = json.loads(response)
        print(f"Analysis complete: {data['summary']}")

        return data

    finally:
        # Clean up
        agent.delete()

if __name__ == "__main__":
    result = run_agent_analysis()

Batch Processing

def batch_agent_analysis(topics):
    """Run agent analysis on multiple topics."""
    client = Client()

    # Create agent
    agent = client.create_agent(
        name="batch-analyzer",
        instruction="Analyze topics and provide structured insights."
    )

    results = []

    try:
        for topic in topics:
            print(f"Analyzing: {topic}")

            response = agent.run(
                f"Analyze the topic: {topic}",
                renderer="json"
            )

            # Parse and store result
            data = json.loads(response)
            results.append({
                "topic": topic,
                "analysis": data
            })

    finally:
        agent.delete()

    return results

# Run batch analysis
topics = ["AI", "Machine Learning", "Data Science"]
results = batch_agent_analysis(topics)

# Save results
with open("batch_analysis.json", "w") as f:
    json.dump(results, f, indent=2)

πŸ”§ Advanced Scripting

Error Handling and Retries

import time
import httpx
from glaip_sdk import Client

def run_with_retry(client, agent_id, message, max_retries=3):
    """Run agent with retry logic."""
    for attempt in range(max_retries):
        try:
            response = client.run_agent(
                agent_id,
                message,
                renderer="json"
            )
            return response

        except (httpx.HTTPStatusError, httpx.ConnectError) as e:
            print(f"Attempt {attempt + 1} failed: {e}")
            if attempt < max_retries - 1:
                time.sleep(2 ** attempt)  # Exponential backoff
            else:
                raise

    raise Exception("Max retries exceeded")

# Usage
client = Client()
try:
    response = run_with_retry(client, "agent-id", "Analyze data")
    print("Success:", response)
except Exception as e:
    print("Failed after retries:", e)

Data Processing and Filtering

import json
import jq  # pip install jq

def process_agent_responses(responses):
    """Process and filter agent responses."""
    processed = []

    for response in responses:
        try:
            data = json.loads(response)

            # Filter and transform data
            if data.get("confidence", 0) > 0.8:
                processed.append({
                    "topic": data.get("topic"),
                    "summary": data.get("summary"),
                    "confidence": data.get("confidence")
                })

        except json.JSONDecodeError:
            print(f"Invalid JSON response: {response}")

    return processed

# Alternative using jq-like filtering
def filter_with_jq(data, jq_filter):
    """Apply jq-like filtering to JSON data."""
    try:
        import jq
        return jq.compile(jq_filter).input(data).first()
    except ImportError:
        print("jq library not available. Install with: pip install jq")
        return data

# Usage
responses = ["response1", "response2", "response3"]
processed = process_agent_responses(responses)

# Filter with jq
filtered = filter_with_jq(processed, ".[] | select(.confidence > 0.8)")

Dependencies: The jq command-line JSON processor is used in examples for filtering and formatting JSON output. Install it via your package manager: apt install jq (Ubuntu), brew install jq (macOS), or choco install jq (Windows).

πŸ“Š CI/CD Integration

GitHub Actions

name: Agent Analysis

on:
  push:
    branches: [main]
  schedule:
    - cron: '0 2 * * *'  # Daily at 2 AM

jobs:
  analyze:
    runs-on: ubuntu-latest

    steps:
    - uses: actions/checkout@v4

    - name: Set up Python
      uses: actions/setup-python@v4
      with:
        python-version: '3.11'

    - name: Install dependencies
      run: |
        pip install glaip-sdk jq

    - name: Run agent analysis
      env:
        AIP_API_URL: ${{ secrets.AIP_API_URL }}
        AIP_API_KEY: ${{ secrets.AIP_API_KEY }}
      run: |
        python scripts/agent_analysis.py

    - name: Upload results
      uses: actions/upload-artifact@v3
      with:
        name: analysis-results
        path: results/

Docker Integration

FROM python:3.11-slim

WORKDIR /app

# Install dependencies
COPY requirements.txt .
RUN pip install -r requirements.txt

# Copy scripts
COPY scripts/ .

# Set environment variables
ENV AIP_API_URL=""
ENV AIP_API_KEY=""

# Run analysis
CMD ["python", "agent_analysis.py"]
# docker-compose.yml
version: '3.8'
services:
  agent-analyzer:
    build: .
    environment:
      - AIP_API_URL=${AIP_API_URL}
      - AIP_API_KEY=${AIP_API_KEY}
    volumes:
      - ./data:/app/data
      - ./results:/app/results

πŸ” Monitoring and Logging

Structured Logging

import logging
import json
from datetime import datetime

# Configure logging
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)

def log_agent_operation(operation, agent_id, input_data, output_data):
    """Log agent operations in structured format."""
    log_entry = {
        "timestamp": datetime.utcnow().isoformat(),
        "operation": operation,
        "agent_id": agent_id,
        "input": input_data,
        "output": output_data,
        "duration_ms": None  # Add timing if needed
    }

    logger.info(json.dumps(log_entry))

# Usage
try:
    start_time = datetime.utcnow()

    response = agent.run("Analyze data", renderer="json")

    duration = (datetime.utcnow() - start_time).total_seconds() * 1000

    log_agent_operation(
        "run",
        agent.id,
        "Analyze data",
        response
    )

except Exception as e:
    logger.error(f"Agent operation failed: {e}")

🎯 Best Practices

Output Format Selection

  • JSON: Use for scripting, automation, and data processing

  • Markdown: Use for documentation and human-readable reports

  • Plain Text: Use for simple text processing and logging

  • Auto: Let the SDK choose the best format for your environment

Scripting Guidelines

  • Error Handling: Always implement proper error handling and retries

  • Resource Cleanup: Clean up resources in finally blocks or cleanup functions

  • Logging: Use structured logging for monitoring and debugging

  • Configuration: Use environment variables for sensitive configuration

Performance Optimization

  • Batch Operations: Process multiple items together when possible

  • Caching: Cache results when appropriate

  • Connection Reuse: Reuse client instances

  • Timeout Management: Set appropriate timeouts for different operations

πŸš€ Next Steps

  • Build Agents: Create agents with custom tools

  • Connect Services: Integrate MCPs

  • Handle Files: Learn file uploads

  • Master Streaming: Use streaming and renderers

πŸ’‘ Pro Tips

  • Start with JSON: Use JSON output for all automation and scripting

  • Implement Retries: Add retry logic for production reliability

  • Use Structured Logging: Log operations in machine-readable format

  • Monitor Performance: Track response times and success rates

  • Handle Errors Gracefully: Implement proper error handling and recovery

  • Test Thoroughly: Test scripts in development before production use

Ready to automate with agents? Start with simple scripts and gradually build more complex automation workflows!