# API 集成

> 💡 **推荐：** 使用官方 [clore-ai Python SDK](https://docs.clore.ai/guides/guides_v2-zh/gao-ji/python-sdk) 而不是原始的 HTTP 请求来管理 Clore.ai 服务器和订单。内置速率限制、重试、类型安全和异步支持。

将运行在 CLORE.AI 上的 AI 模型集成到您的应用中。

{% hint style="success" %}
在以下位置部署 API 服务器： [CLORE.AI 市场](https://clore.ai/marketplace).
{% endhint %}

## 快速开始

CLORE.AI 上的大多数 AI 服务提供与 OpenAI 兼容的 API。替换基 URL 就可以使用。

```python
from openai import OpenAI

client = OpenAI(
    base_url="http://<your-clore-server>:8000/v1",
    api_key="not-needed"  # 大多数自托管不需要密钥
)

response = client.chat.completions.create(
    model="meta-llama/Llama-3.1-8B-Instruct",
    messages=[{"role": "user", "content": "Hello!"}]
)
print(response.choices[0].message.content)
```

***

## LLM API

### vLLM（与 OpenAI 兼容）

**服务器设置：**

```bash
python -m vllm.entrypoints.openai.api_server \
    --model meta-llama/Llama-3.1-8B-Instruct \
    --host 0.0.0.0 --port 8000
```

**Python 客户端：**

```python
from openai import OpenAI

client = OpenAI(base_url="http://server:8000/v1", api_key="dummy")

# 聊天补全
response = client.chat.completions.create(
    model="meta-llama/Llama-3.1-8B-Instruct",
    messages=[
        {"role": "system", "content": "You are a helpful assistant."},
        {"role": "user", "content": "Write a poem about coding"}
    ],
    temperature=0.7,
    max_tokens=500
)
print(response.choices[0].message.content)

# 流式
stream = client.chat.completions.create(
    model="meta-llama/Llama-3.1-8B-Instruct",
    messages=[{"role": "user", "content": "Tell me a story"}],
    stream=True
)
for chunk in stream:
    if chunk.choices[0].delta.content:
        print(chunk.choices[0].delta.content, end="", flush=True)
```

**Node.js 客户端：**

```javascript
import OpenAI from 'openai';

const client = new OpenAI({
    baseURL: 'http://server:8000/v1',
    apiKey: 'dummy'
});

async function chat(message) {
    const response = await client.chat.completions.create({
        model: 'meta-llama/Llama-3.1-8B-Instruct',
        messages: [{ role: 'user', content: message }]
    });
    return response.choices[0].message.content;
}

// 流式
async function streamChat(message) {
    const stream = await client.chat.completions.create({
        model: 'meta-llama/Llama-3.1-8B-Instruct',
        messages: [{ role: 'user', content: message }],
        stream: true
    });

    for await (const chunk of stream) {
        process.stdout.write(chunk.choices[0]?.delta?.content || '');
    }
}
```

**cURL：**

```bash
curl http://server:8000/v1/chat/completions \
    -H "Content-Type: application/json" \
    -d '{
        "model": "meta-llama/Llama-3.1-8B-Instruct",
        "messages": [{"role": "user", "content": "Hello!"}]
    }'
```

### Ollama API

**Python：**

```python
import requests

# 生成
response = requests.post('http://server:11434/api/generate', json={
    'model': 'llama3.2',
    'prompt': 'Why is the sky blue?',
    'stream': False
})
print(response.json()['response'])

# 聊天
response = requests.post('http://server:11434/api/chat', json={
    'model': 'llama3.2',
    'messages': [
        {'role': 'user', 'content': 'Hello!'}
    ],
    'stream': False
})
print(response.json()['message']['content'])

# 流式
response = requests.post('http://server:11434/api/chat', json={
    'model': 'llama3.2',
    'messages': [{'role': 'user', 'content': 'Tell me a story'}],
    'stream': True
}, stream=True)

for line in response.iter_lines():
    if line:
        data = json.loads(line)
        print(data['message']['content'], end='', flush=True)
```

**Ollama 也支持 OpenAI 格式：**

```python
from openai import OpenAI

client = OpenAI(base_url='http://server:11434/v1', api_key='ollama')
# 使用与 vLLM 示例相同的代码
```

### TGI API

**Python：**

```python
import requests

# 生成
response = requests.post('http://server:8080/generate', json={
    'inputs': 'What is machine learning?',
    'parameters': {
        'max_new_tokens': 200,
        'temperature': 0.7,
        'do_sample': True
    }
})
print(response.json()['generated_text'])

# 流式
response = requests.post('http://server:8080/generate_stream', json={
    'inputs': 'Explain quantum computing',
    'parameters': {'max_new_tokens': 500}
}, stream=True)

for line in response.iter_lines():
    if line:
        data = json.loads(line.decode().replace('data:', ''))
        print(data.get('token', {}).get('text', ''), end='', flush=True)
```

***

## 图像生成 API

### Stable Diffusion WebUI API

**启用 API：** 添加 `--api` 到启动命令中。

**Python：**

```python
import requests
import base64
from PIL import Image
from io import BytesIO

def txt2img(prompt, negative_prompt="", steps=20, width=512, height=512):
    response = requests.post('http://server:7860/sdapi/v1/txt2img', json={
        'prompt': prompt,
        'negative_prompt': negative_prompt,
        'steps': steps,
        'width': width,
        'height': height,
        'sampler_name': 'DPM++ 2M Karras',
        'cfg_scale': 7
    })

    # 解码 base64 图像
    image_data = base64.b64decode(response.json()['images'][0])
    return Image.open(BytesIO(image_data))

# 生成
image = txt2img(
    prompt="A beautiful sunset over mountains, photorealistic, 8k",
    negative_prompt="blurry, low quality"
)
image.save("output.png")

# img2img
def img2img(prompt, image_path, denoising=0.5):
    with open(image_path, 'rb') as f:
        image_b64 = base64.b64encode(f.read()).decode()

    response = requests.post('http://server:7860/sdapi/v1/img2img', json={
        'prompt': prompt,
        'init_images': [image_b64],
        'denoising_strength': denoising,
        'steps': 30
    })

    image_data = base64.b64decode(response.json()['images'][0])
    return Image.open(BytesIO(image_data))
```

**Node.js：**

```javascript
const axios = require('axios');
const fs = require('fs');

async function txt2img(prompt) {
    const response = await axios.post('http://server:7860/sdapi/v1/txt2img', {
        prompt: prompt,
        steps: 20,
        width: 512,
        height: 512
    });

    const imageBuffer = Buffer.from(response.data.images[0], 'base64');
    fs.writeFileSync('output.png', imageBuffer);
}
```

### ComfyUI API

**Python：**

```python
import json
import urllib.request
import urllib.parse
import websocket
import uuid

SERVER = "server:8188"

def queue_prompt(workflow):
    """将工作流加入队列以执行"""
    data = json.dumps({"prompt": workflow}).encode('utf-8')
    req = urllib.request.Request(f"http://{SERVER}/prompt", data=data)
    return json.loads(urllib.request.urlopen(req).read())

def get_image(filename, subfolder, folder_type):
    """下载生成的图片"""
    params = urllib.parse.urlencode({
        "filename": filename,
        "subfolder": subfolder,
        "type": folder_type
    })
    with urllib.request.urlopen(f"http://{SERVER}/view?{params}") as response:
        return response.read()

# 从文件加载工作流
with open('workflow.json') as f:
    workflow = json.load(f)

# 修改提示词
workflow["6"]["inputs"]["text"] = "A cat wearing a hat"

# 排队并获取结果
result = queue_prompt(workflow)
print(f"Queued: {result}")
```

**进度的 WebSocket：**

```python
import websocket
import json

def on_message(ws, message):
    data = json.loads(message)
    if data['type'] == 'progress':
        print(f"Progress: {data['data']['value']}/{data['data']['max']}")
    elif data['type'] == 'executed':
        print("生成完成！")

ws = websocket.WebSocketApp(
    f"ws://{SERVER}/ws",
    on_message=on_message
)
ws.run_forever()
```

### FLUX 与 Diffusers

```python
import torch
from diffusers import FluxPipeline
import base64
from io import BytesIO

# 加载模型（只做一次）
pipe = FluxPipeline.from_pretrained(
    "black-forest-labs/FLUX.1-schnell",
    torch_dtype=torch.bfloat16
)
pipe.to("cuda")

def generate_image(prompt, height=1024, width=1024):
    image = pipe(
        prompt,
        height=height,
        width=width,
        num_inference_steps=4,
        guidance_scale=0.0
    ).images[0]
    return image

# 使用 Flask 的简单 API 封装
from flask import Flask, request, jsonify

app = Flask(__name__)

@app.route('/generate', methods=['POST'])
def generate():
    data = request.json
    image = generate_image(data['prompt'])

    # 转换为 base64
    buffer = BytesIO()
    image.save(buffer, format='PNG')
    img_b64 = base64.b64encode(buffer.getvalue()).decode()

    return jsonify({'image': img_b64})

if __name__ == '__main__':
    app.run(host='0.0.0.0', port=5000)
```

***

## 音频 API

### Whisper 转录

**使用 whisper-asr-webservice：**

```python
import requests

def transcribe(audio_path):
    with open(audio_path, 'rb') as f:
        response = requests.post(
            'http://server:9000/asr',
            files={'audio_file': f},
            data={
                'task': 'transcribe',
                'language': 'en',
                'output': 'json'
            }
        )
    return response.json()['text']

text = transcribe('audio.mp3')
print(text)
```

**直接的 Whisper API：**

```python
import whisper
from flask import Flask, request, jsonify

model = whisper.load_model("large-v3")

app = Flask(__name__)

@app.route('/transcribe', methods=['POST'])
def transcribe():
    audio = request.files['audio']
    audio.save('/tmp/audio.mp3')

    result = model.transcribe('/tmp/audio.mp3')
    return jsonify({'text': result['text']})
```

### 文本转语音（Bark）

```python
from bark import SAMPLE_RATE, generate_audio, preload_models
from scipy.io.wavfile import write as write_wav
import base64
from flask import Flask, request, jsonify

preload_models()

app = Flask(__name__)

@app.route('/tts', methods=['POST'])
def text_to_speech():
    text = request.json['text']
    audio = generate_audio(text)

    # 保存到文件
    write_wav('/tmp/output.wav', SAMPLE_RATE, audio)

    # 返回 base64
    with open('/tmp/output.wav', 'rb') as f:
        audio_b64 = base64.b64encode(f.read()).decode()

    return jsonify({'audio': audio_b64})
```

***

## 构建应用

### 聊天应用

```python
from flask import Flask, request, jsonify, Response
from openai import OpenAI
import json

app = Flask(__name__)
client = OpenAI(base_url="http://localhost:8000/v1", api_key="dummy")

@app.route('/chat', methods=['POST'])
def chat():
    messages = request.json.get('messages', [])

    response = client.chat.completions.create(
        model="meta-llama/Llama-3.1-8B-Instruct",
        messages=messages,
        temperature=0.7
    )

    return jsonify({
        'response': response.choices[0].message.content
    })

@app.route('/chat/stream', methods=['POST'])
def chat_stream():
    messages = request.json.get('messages', [])

    def generate():
        stream = client.chat.completions.create(
            model="meta-llama/Llama-3.1-8B-Instruct",
            messages=messages,
            stream=True
        )
        for chunk in stream:
            if chunk.choices[0].delta.content:
                yield f"data: {json.dumps({'content': chunk.choices[0].delta.content})}\n\n"
        yield "data: [DONE]\n\n"

    return Response(generate(), mimetype='text/event-stream')

if __name__ == '__main__':
    app.run(host='0.0.0.0', port=5000)
```

### 图像生成服务

```python
from flask import Flask, request, jsonify, send_file
import requests
import base64
from io import BytesIO

app = Flask(__name__)
SD_API = "http://localhost:7860"

@app.route('/generate', methods=['POST'])
def generate():
    data = request.json

    response = requests.post(f'{SD_API}/sdapi/v1/txt2img', json={
        'prompt': data['prompt'],
        'negative_prompt': data.get('negative_prompt', ''),
        'steps': data.get('steps', 20),
        'width': data.get('width', 512),
        'height': data.get('height', 512)
    })

    image_b64 = response.json()['images'][0]

    if data.get('return_base64'):
        return jsonify({'image': image_b64})

    # 以文件形式返回
    image_data = base64.b64decode(image_b64)
    return send_file(BytesIO(image_data), mimetype='image/png')

if __name__ == '__main__':
    app.run(host='0.0.0.0', port=5000)
```

### 多模态管道

```python
from flask import Flask, request, jsonify
from openai import OpenAI
import requests
import base64

app = Flask(__name__)
llm_client = OpenAI(base_url="http://localhost:8000/v1", api_key="dummy")
SD_API = "http://localhost:7860"

@app.route('/create-image-from-description', methods=['POST'])
def create_image():
    description = request.json['description']

    # 步骤 1：使用 LLM 生成详细的提示词
    prompt_response = llm_client.chat.completions.create(
        model="meta-llama/Llama-3.1-8B-Instruct",
        messages=[{
            "role": "user",
            "content": f"Create a detailed image generation prompt for: {description}. Include style, lighting, and composition details. Return only the prompt, no explanation."
        }]
    )
    detailed_prompt = prompt_response.choices[0].message.content

    # 步骤 2：生成图像
    image_response = requests.post(f'{SD_API}/sdapi/v1/txt2img', json={
        'prompt': detailed_prompt,
        'steps': 25,
        'width': 1024,
        'height': 1024
    })

    return jsonify({
        'prompt_used': detailed_prompt,
        'image': image_response.json()['images'][0]
    })

if __name__ == '__main__':
    app.run(host='0.0.0.0', port=5000)
```

***

## 错误处理

```python
from openai import OpenAI, APIError, APIConnectionError
import time

client = OpenAI(base_url="http://server:8000/v1", api_key="dummy")

def chat_with_retry(messages, max_retries=3):
    for attempt in range(max_retries):
        try:
            response = client.chat.completions.create(
                model="meta-llama/Llama-3.1-8B-Instruct",
                messages=messages,
                timeout=60
            )
            return response.choices[0].message.content

        except APIConnectionError as e:
            print(f"Connection error (attempt {attempt + 1}): {e}")
            if attempt < max_retries - 1:
                time.sleep(2 ** attempt)  # 指数退避
            else:
                raise

        except APIError as e:
            print(f"API error: {e}")
            raise

# 用法
try:
    result = chat_with_retry([{"role": "user", "content": "Hello"}])
    print(result)
except Exception as e:
    print(f"Failed after retries: {e}")
```

***

## 最佳实践

1. **连接池** - 重用 HTTP 连接
2. **异步请求** - 对于并发调用使用 aiohttp
3. **超时** - 始终设置请求超时
4. **重试逻辑** - 处理临时故障
5. **速率限制** - 不要压垮服务器
6. **运行状况检查** - 监控服务器可用性

***

## 下一步

* [批量处理](https://docs.clore.ai/guides/guides_v2-zh/gao-ji/batch-processing) - 处理大型工作负载
* [多 GPU 设置](https://docs.clore.ai/guides/guides_v2-zh/gao-ji/multi-gpu-setup) - 扩展您的部署
* [LLM 比较](https://docs.clore.ai/guides/guides_v2-zh/dui-bi/llm-serving-comparison) - 选择合适的服务器
