# SDK Automation Recipes

Production-ready automation patterns using the `clore-ai` SDK. Every recipe is copy-paste ready and battle-tested.

***

## Recipe 1: GPU Auto-Scaler

Monitor a task queue and automatically rent GPUs when demand exceeds capacity.

```python
"""
GPU Auto-Scaler
Watches a Redis queue and scales GPU workers up/down.
"""

import time
import logging
from dataclasses import dataclass, field
from typing import List

import redis
from clore_ai import CloreAI
from clore_ai.exceptions import CloreAPIError, InvalidInputError

logging.basicConfig(level=logging.INFO, format="%(asctime)s %(levelname)s %(message)s")
log = logging.getLogger("autoscaler")

@dataclass
class ScalerConfig:
    """Auto-scaler configuration."""
    queue_name: str = "gpu_tasks"
    gpu_model: str = "RTX 4090"
    max_price_usd: float = 0.60
    image: str = "cloreai/pytorch"
    currency: str = "bitcoin"
    min_workers: int = 0
    max_workers: int = 5
    scale_up_threshold: int = 10     # Queue length to trigger scale-up
    scale_down_threshold: int = 2    # Queue length to trigger scale-down
    cooldown_seconds: int = 120      # Min time between scaling actions
    check_interval: int = 30         # Seconds between checks

@dataclass
class WorkerState:
    """Tracks active GPU workers."""
    order_ids: List[int] = field(default_factory=list)
    last_scale_action: float = 0.0

def run_autoscaler(config: ScalerConfig):
    client = CloreAI()
    r = redis.Redis()
    state = WorkerState()

    log.info(f"Auto-scaler started: queue={config.queue_name}, gpu={config.gpu_model}")

    while True:
        try:
            queue_len = r.llen(config.queue_name)
            active = len(state.order_ids)
            now = time.time()
            cooldown_ok = (now - state.last_scale_action) > config.cooldown_seconds

            log.info(f"Queue: {queue_len} | Workers: {active}/{config.max_workers}")

            # --- Scale UP ---
            if queue_len >= config.scale_up_threshold and active < config.max_workers and cooldown_ok:
                servers = client.marketplace(
                    gpu=config.gpu_model,
                    max_price_usd=config.max_price_usd,
                )
                if servers:
                    servers.sort(key=lambda s: s.price_usd or float("inf"))
                    best = servers[0]
                    try:
                        order = client.create_order(
                            server_id=best.id,
                            image=config.image,
                            type="on-demand",
                            currency=config.currency,
                            ports={"22": "tcp"},
                        )
                        state.order_ids.append(order.id)
                        state.last_scale_action = now
                        log.info(f"⬆️  Scaled UP: order {order.id} on server {best.id}")
                    except InvalidInputError as e:
                        log.warning(f"Cannot rent server {best.id}: {e}")
                else:
                    log.warning("No servers available matching criteria")

            # --- Scale DOWN ---
            elif queue_len <= config.scale_down_threshold and active > config.min_workers and cooldown_ok:
                victim = state.order_ids.pop()
                try:
                    client.cancel_order(victim, issue="Autoscaler: low demand")
                    state.last_scale_action = now
                    log.info(f"⬇️  Scaled DOWN: cancelled order {victim}")
                except CloreAPIError as e:
                    log.warning(f"Failed to cancel order {victim}: {e}")

        except Exception as e:
            log.error(f"Autoscaler error: {e}")

        time.sleep(config.check_interval)


if __name__ == "__main__":
    run_autoscaler(ScalerConfig(
        queue_name="inference_tasks",
        gpu_model="RTX 4090",
        max_workers=3,
    ))
```

***

## Recipe 2: Multi-GPU Orchestrator (Async)

Deploy the same workload across N servers in parallel using the async client.

```python
"""
Multi-GPU Orchestrator
Deploy workloads across multiple servers concurrently.
"""

import asyncio
import logging
from dataclasses import dataclass
from typing import List, Optional

from clore_ai import AsyncCloreAI
from clore_ai.exceptions import CloreAPIError

logging.basicConfig(level=logging.INFO)
log = logging.getLogger("orchestrator")

@dataclass
class DeployResult:
    server_id: int
    order_id: Optional[int] = None
    pub_cluster: Optional[str] = None
    error: Optional[str] = None
    success: bool = False

async def deploy_to_server(
    client: AsyncCloreAI,
    server_id: int,
    image: str,
    currency: str,
    ssh_password: str,
) -> DeployResult:
    """Deploy to a single server, returning a result object."""
    try:
        order = await client.create_order(
            server_id=server_id,
            image=image,
            type="on-demand",
            currency=currency,
            ssh_password=ssh_password,
            ports={"22": "tcp", "8888": "http"},
            env={"NVIDIA_VISIBLE_DEVICES": "all"},
        )
        log.info(f"✅ Server {server_id}: order {order.id} @ {order.pub_cluster}")
        return DeployResult(
            server_id=server_id,
            order_id=order.id,
            pub_cluster=order.pub_cluster,
            success=True,
        )
    except CloreAPIError as e:
        log.error(f"❌ Server {server_id}: {e}")
        return DeployResult(server_id=server_id, error=str(e))

async def orchestrate(
    gpu_model: str = "RTX 4090",
    count: int = 3,
    max_price_usd: float = 0.80,
    image: str = "cloreai/pytorch",
    currency: str = "bitcoin",
    ssh_password: str = "OrchestratorPass123",
) -> List[DeployResult]:
    """
    Find `count` cheapest servers and deploy to all of them concurrently.
    """
    async with AsyncCloreAI() as client:
        # Find servers
        servers = await client.marketplace(
            gpu=gpu_model,
            max_price_usd=max_price_usd,
        )
        servers.sort(key=lambda s: s.price_usd or float("inf"))
        targets = servers[:count]

        if len(targets) < count:
            log.warning(f"Only {len(targets)} servers available (requested {count})")

        if not targets:
            log.error("No servers available")
            return []

        log.info(f"Deploying to {len(targets)} servers...")

        # Deploy concurrently (with 5s stagger to respect create_order cooldown)
        results = []
        for i, server in enumerate(targets):
            if i > 0:
                await asyncio.sleep(5.5)  # Respect create_order rate limit
            result = await deploy_to_server(
                client, server.id, image, currency, ssh_password
            )
            results.append(result)

        # Summary
        success_count = sum(1 for r in results if r.success)
        log.info(f"\n{'='*40}")
        log.info(f"Deployed: {success_count}/{len(targets)}")
        for r in results:
            if r.success:
                log.info(f"  ✅ Order {r.order_id} @ {r.pub_cluster}")
            else:
                log.info(f"  ❌ Server {r.server_id}: {r.error}")

        return results

async def teardown(order_ids: List[int]):
    """Cancel all orders."""
    async with AsyncCloreAI() as client:
        for oid in order_ids:
            try:
                await client.cancel_order(oid, issue="Orchestrator teardown")
                log.info(f"Cancelled order {oid}")
            except CloreAPIError as e:
                log.warning(f"Failed to cancel {oid}: {e}")


if __name__ == "__main__":
    results = asyncio.run(orchestrate(
        gpu_model="RTX 4090",
        count=3,
        max_price_usd=0.80,
    ))

    # When done, teardown
    active_ids = [r.order_id for r in results if r.success and r.order_id]
    if active_ids:
        input("Press Enter to teardown all orders...")
        asyncio.run(teardown(active_ids))
```

***

## Recipe 3: Spot Bidding Bot

Monitor spot prices and automatically bid to maintain the cheapest GPU.

```python
"""
Spot Bidding Bot
Monitors spot market and adjusts bids to stay competitive.
"""

import time
import logging
from typing import Optional

from clore_ai import CloreAI
from clore_ai.exceptions import CloreAPIError

logging.basicConfig(level=logging.INFO)
log = logging.getLogger("bidbot")

class SpotBidBot:
    def __init__(
        self,
        client: CloreAI,
        order_id: int,
        server_id: int,
        max_price: float = 0.0001,
        undercut_pct: float = 0.05,    # Undercut competitors by 5%
        floor_price: float = 0.000001, # Never bid below this
        check_interval: int = 60,
    ):
        self.client = client
        self.order_id = order_id
        self.server_id = server_id
        self.max_price = max_price
        self.undercut_pct = undercut_pct
        self.floor_price = floor_price
        self.check_interval = check_interval
        self.current_bid: Optional[float] = None

    def get_market_price(self) -> Optional[float]:
        """Get the lowest competing spot offer."""
        try:
            market = self.client.spot_marketplace(self.server_id)
            if not market.offers:
                return None
            # Filter out our own order
            others = [o for o in market.offers if o.order_id != self.order_id]
            if others:
                return min(o.price for o in others if o.price is not None)
            return None
        except CloreAPIError as e:
            log.warning(f"Failed to get spot market: {e}")
            return None

    def calculate_bid(self, market_price: Optional[float]) -> float:
        """Calculate optimal bid price."""
        if market_price is None:
            # No competition — bid at floor
            return self.floor_price

        # Undercut by configured percentage
        bid = market_price * (1 - self.undercut_pct)

        # Clamp to bounds
        bid = max(bid, self.floor_price)
        bid = min(bid, self.max_price)

        return round(bid, 8)

    def update_bid(self, new_price: float):
        """Submit updated bid to the API."""
        if new_price == self.current_bid:
            return  # No change needed

        try:
            self.client.set_spot_price(self.order_id, new_price)
            self.current_bid = new_price
            log.info(f"💰 Bid updated: {new_price:.8f}")
        except CloreAPIError as e:
            log.error(f"Failed to update bid: {e}")

    def run(self):
        """Main bot loop."""
        log.info(f"Spot bot started: order={self.order_id}, server={self.server_id}")
        log.info(f"  Max price: {self.max_price}, floor: {self.floor_price}")

        while True:
            try:
                market = self.get_market_price()
                bid = self.calculate_bid(market)

                if market:
                    log.info(f"Market: {market:.8f} → My bid: {bid:.8f}")
                else:
                    log.info(f"No competition → bid: {bid:.8f}")

                self.update_bid(bid)

            except Exception as e:
                log.error(f"Bot error: {e}")

            time.sleep(self.check_interval)


if __name__ == "__main__":
    client = CloreAI()

    bot = SpotBidBot(
        client=client,
        order_id=39,       # Your active spot order
        server_id=6,       # The server ID
        max_price=0.0001,
        undercut_pct=0.05,
        check_interval=60,
    )
    bot.run()
```

***

## Recipe 4: Server Health Checker with Auto-Recovery

Monitor your active orders and automatically replace unhealthy instances.

```python
"""
Health Checker with Auto-Recovery
Monitors GPU instances and replaces unresponsive ones.
"""

import time
import subprocess
import logging
from typing import Optional, Dict

from clore_ai import CloreAI
from clore_ai.exceptions import CloreAPIError

logging.basicConfig(level=logging.INFO)
log = logging.getLogger("healthcheck")

class HealthChecker:
    def __init__(
        self,
        client: CloreAI,
        image: str = "cloreai/ubuntu22.04-cuda12",
        currency: str = "bitcoin",
        ssh_password: str = "HealthCheck123",
        max_failures: int = 3,
        check_interval: int = 120,
    ):
        self.client = client
        self.image = image
        self.currency = currency
        self.ssh_password = ssh_password
        self.max_failures = max_failures
        self.check_interval = check_interval
        self.failure_counts: Dict[int, int] = {}

    def check_order_health(self, order_id: int, host: str, port: int = 22) -> bool:
        """Check if an order's instance is reachable via SSH."""
        try:
            result = subprocess.run(
                [
                    "ssh",
                    "-o", "StrictHostKeyChecking=no",
                    "-o", "ConnectTimeout=10",
                    "-p", str(port),
                    f"root@{host}",
                    "nvidia-smi --query-gpu=name --format=csv,noheader",
                ],
                capture_output=True,
                text=True,
                timeout=30,
            )
            if result.returncode == 0 and result.stdout.strip():
                log.info(f"✅ Order {order_id}: healthy ({result.stdout.strip()})")
                return True
            else:
                log.warning(f"⚠️  Order {order_id}: nvidia-smi failed")
                return False
        except (subprocess.TimeoutExpired, Exception) as e:
            log.warning(f"⚠️  Order {order_id}: unreachable ({e})")
            return False

    def replace_order(self, order_id: int) -> Optional[int]:
        """Cancel unhealthy order and create a replacement."""
        try:
            # Cancel the bad order
            self.client.cancel_order(order_id, issue="Health check: unresponsive")
            log.info(f"Cancelled unhealthy order {order_id}")

            # Find a replacement server
            servers = self.client.marketplace(available_only=True)
            if not servers:
                log.error("No replacement servers available")
                return None

            servers.sort(key=lambda s: s.price_usd or float("inf"))
            best = servers[0]

            # Create replacement
            new_order = self.client.create_order(
                server_id=best.id,
                image=self.image,
                type="on-demand",
                currency=self.currency,
                ssh_password=self.ssh_password,
                ports={"22": "tcp"},
            )
            log.info(f"🔄 Replacement: order {new_order.id} on server {best.id}")
            return new_order.id

        except CloreAPIError as e:
            log.error(f"Replacement failed: {e}")
            return None

    def run(self):
        """Main monitoring loop."""
        log.info("Health checker started")

        while True:
            try:
                orders = self.client.my_orders()
                for order in orders:
                    if not order.pub_cluster:
                        continue  # Still provisioning

                    # Parse SSH port
                    ssh_port = 22
                    if order.tcp_ports and "22" in order.tcp_ports:
                        ssh_port = order.tcp_ports["22"]

                    healthy = self.check_order_health(
                        order.id, order.pub_cluster, ssh_port
                    )

                    if healthy:
                        self.failure_counts[order.id] = 0
                    else:
                        self.failure_counts[order.id] = self.failure_counts.get(order.id, 0) + 1
                        failures = self.failure_counts[order.id]
                        log.warning(
                            f"Order {order.id}: {failures}/{self.max_failures} failures"
                        )

                        if failures >= self.max_failures:
                            log.error(f"Order {order.id}: replacing (too many failures)")
                            new_id = self.replace_order(order.id)
                            if new_id:
                                del self.failure_counts[order.id]
                                self.failure_counts[new_id] = 0

            except Exception as e:
                log.error(f"Health check error: {e}")

            time.sleep(self.check_interval)


if __name__ == "__main__":
    client = CloreAI()
    checker = HealthChecker(client, max_failures=3, check_interval=120)
    checker.run()
```

***

## Recipe 5: Budget-Aware Cost Tracker

Track spending and stop renting when you hit a budget limit.

```python
"""
Cost Tracker with Budget Alerts
Monitors spending and auto-cancels orders when budget is exceeded.
"""

import time
import json
import logging
from pathlib import Path
from datetime import datetime, timedelta
from typing import List, Dict, Optional

from clore_ai import CloreAI
from clore_ai.exceptions import CloreAPIError

logging.basicConfig(level=logging.INFO)
log = logging.getLogger("costtracker")

class CostTracker:
    def __init__(
        self,
        client: CloreAI,
        daily_budget_usd: float = 10.0,
        monthly_budget_usd: float = 200.0,
        state_file: str = "cost_state.json",
        alert_callback=None,
    ):
        self.client = client
        self.daily_budget_usd = daily_budget_usd
        self.monthly_budget_usd = monthly_budget_usd
        self.state_file = Path(state_file)
        self.alert_callback = alert_callback or (lambda msg: log.warning(msg))
        self.state = self._load_state()

    def _load_state(self) -> Dict:
        if self.state_file.exists():
            return json.loads(self.state_file.read_text())
        return {"entries": [], "total_usd": 0.0}

    def _save_state(self):
        self.state_file.write_text(json.dumps(self.state, indent=2))

    def record(self, order_id: int, hours: float, price_usd_per_hour: float, gpu: str):
        """Record a cost entry."""
        cost = hours * price_usd_per_hour
        entry = {
            "timestamp": datetime.utcnow().isoformat(),
            "order_id": order_id,
            "hours": hours,
            "cost_usd": cost,
            "gpu": gpu,
        }
        self.state["entries"].append(entry)
        self.state["total_usd"] += cost
        self._save_state()
        log.info(f"Recorded: order {order_id}, {hours:.2f}h, ${cost:.4f}")

    def get_daily_spend(self) -> float:
        cutoff = (datetime.utcnow() - timedelta(days=1)).isoformat()
        return sum(
            e["cost_usd"] for e in self.state["entries"]
            if e["timestamp"] > cutoff
        )

    def get_monthly_spend(self) -> float:
        cutoff = (datetime.utcnow() - timedelta(days=30)).isoformat()
        return sum(
            e["cost_usd"] for e in self.state["entries"]
            if e["timestamp"] > cutoff
        )

    def check_budget(self) -> bool:
        """Check if spending is within budget. Returns False if over budget."""
        daily = self.get_daily_spend()
        monthly = self.get_monthly_spend()

        if daily >= self.daily_budget_usd:
            self.alert_callback(
                f"🚨 DAILY BUDGET EXCEEDED: ${daily:.2f} / ${self.daily_budget_usd:.2f}"
            )
            return False

        if monthly >= self.monthly_budget_usd:
            self.alert_callback(
                f"🚨 MONTHLY BUDGET EXCEEDED: ${monthly:.2f} / ${self.monthly_budget_usd:.2f}"
            )
            return False

        if daily >= self.daily_budget_usd * 0.8:
            self.alert_callback(
                f"⚠️  Daily spend at 80%: ${daily:.2f} / ${self.daily_budget_usd:.2f}"
            )

        return True

    def emergency_shutdown(self):
        """Cancel all active orders."""
        log.warning("🛑 EMERGENCY SHUTDOWN — cancelling all orders")
        try:
            orders = self.client.my_orders()
            for order in orders:
                self.client.cancel_order(order.id, issue="Budget exceeded")
                log.info(f"Cancelled order {order.id}")
        except CloreAPIError as e:
            log.error(f"Shutdown error: {e}")

    def report(self) -> str:
        daily = self.get_daily_spend()
        monthly = self.get_monthly_spend()
        return (
            f"📊 Cost Report\n"
            f"  Today:  ${daily:.2f} / ${self.daily_budget_usd:.2f}\n"
            f"  Month:  ${monthly:.2f} / ${self.monthly_budget_usd:.2f}\n"
            f"  Total:  ${self.state['total_usd']:.2f}"
        )


if __name__ == "__main__":
    client = CloreAI()
    tracker = CostTracker(
        client,
        daily_budget_usd=5.0,
        monthly_budget_usd=100.0,
    )

    # Check current spend
    print(tracker.report())

    # Enforce budget before creating orders
    if tracker.check_budget():
        print("✅ Budget OK — safe to create orders")
    else:
        print("❌ Over budget — shutting down")
        tracker.emergency_shutdown()
```

***

## Recipe 6: End-to-End Training Pipeline

Full lifecycle: find GPU → rent → deploy → train → download results → cancel.

```python
"""
Training Pipeline
Full cycle: rent → deploy → train → collect results → cleanup.
"""

import time
import subprocess
import logging
from dataclasses import dataclass
from typing import Optional

from clore_ai import CloreAI
from clore_ai.exceptions import CloreAPIError

logging.basicConfig(level=logging.INFO)
log = logging.getLogger("pipeline")

@dataclass
class PipelineConfig:
    gpu_model: str = "RTX 4090"
    max_price_usd: float = 0.80
    image: str = "pytorch/pytorch:2.5.1-cuda12.4-cudnn9-runtime"
    currency: str = "bitcoin"
    ssh_password: str = "TrainPipe123"
    setup_script: str = "pip install wandb transformers datasets"
    training_script: str = ""  # Set by caller
    output_path: str = "/workspace/results"
    local_output: str = "./results"
    timeout_minutes: int = 120

def wait_for_ssh(host: str, port: int, timeout: int = 120) -> bool:
    """Wait until SSH is reachable."""
    deadline = time.time() + timeout
    while time.time() < deadline:
        try:
            result = subprocess.run(
                ["ssh", "-o", "StrictHostKeyChecking=no", "-o", "ConnectTimeout=5",
                 "-p", str(port), f"root@{host}", "echo ok"],
                capture_output=True, text=True, timeout=15,
            )
            if result.returncode == 0:
                return True
        except Exception:
            pass
        time.sleep(5)
    return False

def run_pipeline(config: PipelineConfig) -> bool:
    """Execute the full training pipeline."""
    client = CloreAI()
    order_id: Optional[int] = None

    try:
        # 1. Find best server
        log.info(f"🔍 Searching for {config.gpu_model} under ${config.max_price_usd}/h...")
        servers = client.marketplace(
            gpu=config.gpu_model,
            max_price_usd=config.max_price_usd,
        )
        if not servers:
            log.error("No servers found")
            return False

        servers.sort(key=lambda s: s.price_usd or float("inf"))
        best = servers[0]
        log.info(f"Found: server {best.id} ({best.gpu_count}x {best.gpu_model}) — ${best.price_usd:.4f}/h")

        # 2. Create order
        log.info("🚀 Creating order...")
        order = client.create_order(
            server_id=best.id,
            image=config.image,
            type="on-demand",
            currency=config.currency,
            ssh_password=config.ssh_password,
            ports={"22": "tcp"},
            env={"NVIDIA_VISIBLE_DEVICES": "all"},
        )
        order_id = order.id
        log.info(f"Order {order_id} created")

        # 3. Wait for instance to be ready
        log.info("⏳ Waiting for instance...")
        time.sleep(15)  # Give it a moment

        # Refresh order to get connection info
        orders = client.my_orders()
        active = next((o for o in orders if o.id == order_id), None)
        if not active or not active.pub_cluster:
            log.error("Order did not become active")
            return False

        host = active.pub_cluster
        port = 22
        if active.tcp_ports and "22" in active.tcp_ports:
            port = active.tcp_ports["22"]

        log.info(f"Instance: {host}:{port}")
        if not wait_for_ssh(host, port):
            log.error("SSH not reachable within timeout")
            return False

        ssh_base = ["ssh", "-o", "StrictHostKeyChecking=no", "-p", str(port), f"root@{host}"]

        # 4. Setup
        log.info("📦 Running setup...")
        subprocess.run(ssh_base + [config.setup_script], check=True, timeout=300)

        # 5. Train
        log.info("🏋️ Starting training...")
        result = subprocess.run(
            ssh_base + [config.training_script],
            capture_output=True,
            text=True,
            timeout=config.timeout_minutes * 60,
        )
        log.info(f"Training output:\n{result.stdout[-2000:]}")  # Last 2000 chars

        if result.returncode != 0:
            log.error(f"Training failed:\n{result.stderr[-1000:]}")
            return False

        # 6. Download results
        log.info("📥 Downloading results...")
        subprocess.run(
            ["scp", "-o", "StrictHostKeyChecking=no", "-r",
             "-P", str(port), f"root@{host}:{config.output_path}", config.local_output],
            check=True,
            timeout=300,
        )
        log.info(f"Results saved to {config.local_output}")

        return True

    except (CloreAPIError, subprocess.CalledProcessError, subprocess.TimeoutExpired) as e:
        log.error(f"Pipeline error: {e}")
        return False

    finally:
        # 7. Cleanup
        if order_id:
            log.info(f"🧹 Cancelling order {order_id}...")
            try:
                client.cancel_order(order_id, issue="Pipeline complete")
            except CloreAPIError:
                pass


if __name__ == "__main__":
    config = PipelineConfig(
        gpu_model="RTX 4090",
        max_price_usd=0.50,
        training_script="cd /workspace && python train.py --epochs 10 --batch-size 32",
        output_path="/workspace/checkpoints",
        local_output="./checkpoints",
        timeout_minutes=60,
    )

    success = run_pipeline(config)
    print(f"\n{'✅ Pipeline succeeded' if success else '❌ Pipeline failed'}")
```

***

## Tips for All Recipes

### 1. Always use `try/finally` for cleanup

```python
order_id = None
try:
    order = client.create_order(...)
    order_id = order.id
    # ... do work ...
finally:
    if order_id:
        client.cancel_order(order_id)
```

### 2. Respect the rate limiter

The SDK handles rate limiting automatically. Don't add your own `time.sleep(1)` between SDK calls — the built-in `RateLimiter` does this for you.

### 3. Use async for concurrent operations

```python
# BAD: Sequential (slow)
for server_id in server_ids:
    client.create_order(server_id=server_id, ...)

# GOOD: Concurrent (but respect create_order cooldown)
async with AsyncCloreAI() as client:
    for server_id in server_ids:
        await client.create_order(server_id=server_id, ...)
        # 5s cooldown is automatic
```

### 4. Handle transient errors gracefully

```python
from clore_ai.exceptions import RateLimitError, CloreAPIError

try:
    result = client.marketplace()
except RateLimitError:
    # SDK already retried max_retries times — back off longer
    time.sleep(30)
except CloreAPIError as e:
    if e.code == 1:  # DB error — transient
        time.sleep(10)
    else:
        raise
```

***

## See Also

* [SDK API Reference](https://docs.clore.ai/dev/reference/python-sdk) — complete method documentation
* [SDK Quick Start](https://docs.clore.ai/dev/getting-started/python-sdk-quickstart) — getting started tutorial
* [CI/CD with clore-ai SDK](https://docs.clore.ai/dev/devops-and-automation/cicd-clore-sdk) — pipeline integration
* [Cost Optimization Strategies](https://docs.clore.ai/dev/devops-and-automation/cost-optimization) — saving money on GPU rental
