python调用本地llama模型的chat

import requests  # 确保这一行在脚本的开头
import json

# 定义 API 端点 URL
url = "http://localhost:11434/api/generate"

# 定义请求数据
data = {
    "model": "llama3.1",  # 使用 llama3.1 模型
    "prompt": "What color is the sky at different times of the day? Respond using JSON",
    "format": "json",
    "stream": False
}

# 发送 POST 请求
response = requests.post(url, json=data)

# 检查响应状态码
if response.status_code == 200:
    # 提取聊天响应
    result = response.json()

    if 'response' in result and result['response']:
        # 解析 JSON 字符串
        chat_response = json.loads(result['response'])

        # 打印格式化的聊天响应
        print("Chat Response:")
        print(json.dumps(chat_response, indent=4, ensure_ascii=False))  # 格式化输出
    else:
        print("No chat response found in the result.")
else:
    print("Failed to generate response. Status code:", response.status_code)
    print("Response:", response.text)

### 输出输出

Chat Response:
{
    "skyColor": {
        "6am": "Deep Blue (almost black)",
        "sunrise": "Light Blue with hues of Pink and Orange",
        "morning": "Medium Blue (with a slight tint of yellow)",
        "midday": "Bright Blue",
        "afternoon": "Pale Blue with hints of White (due to increased cloud cover or dust in the air)",
        "sunset": "Vibrant Orange, Red, and Pink hues",
        "evening": "Soft Pink, Purple, and Blue shades"
    }
}

留下评论

您的邮箱地址不会被公开。 必填项已用 * 标注