python调用本地ollama模型,进行翻译

import requests
import json

# 定义 API 端点 URL
url = "http://localhost:11434/api/generate"

# 定义请求数据
data = {
    "model": "llama3.1",  # 使用 llama3.1 模型
    "prompt": "请翻译为英文:这是一个翻译的测试",
    "format": "json",  # 确保 API 支持这种格式
    "stream": False
}

# 发送 POST 请求
response = requests.post(url, json=data)

# 检查响应状态码
if response.status_code == 200:
    # 提取聊天响应
    result = response.json()

    # 确保 response 键存在于 result 中,并且它的值非空
    if response in result and result[response]:
        try:
            # 解析 JSON 字符串
            chat_response = json.loads(result[response])

            # 打印格式化的聊天响应
            print("Chat Response:")
            print(json.dumps(chat_response, indent=4, ensure_ascii=False))  # 格式化输出
        except json.JSONDecodeError as e:
            print(f"Error parsing JSON response: {e}")
            print("Raw Response:")
            print(result[response])
    else:
        print("No chat response found in the result.")
else:
    print("Failed to generate response. Status code:", response.status_code)
    print("Response:", response.text)

输出内容

(X:\CondaEnvs\vscode-python312) PS E:\Microsoft VS Code\py> & X:/CondaEnvs/vscode-python312/python.exe "e:/Microsoft VS Code/py/test-ollama.py"
Chat Response:
{
    "This is a translation test": ""
}
(X:\CondaEnvs\vscode-python312) PS E:\Microsoft VS Code\py> 

留下评论

您的邮箱地址不会被公开。 必填项已用 * 标注