一个用于与 Ollama API 交互的 Swift 客户端库。
将以下内容添加到你的 Package.swift
文件中
.package(url: "https://github.com/mattt/ollama-swift.git", from: "1.2.0")
import Ollama
// Use the default client (https://:11434)
let client = Client.default
// Or create a custom client
let customClient = Client(host: URL(string: "http://your-ollama-host:11434")!, userAgent: "MyApp/1.0")
使用指定的模型生成文本
do {
let response = try await client.generate(
model: "llama3.2",
prompt: "Tell me a joke about Swift programming.",
options: [
"temperature": 0.7,
"max_tokens": 100
]
)
print(response.response)
} catch {
print("Error: \(error)")
}
生成聊天补全
do {
let response = try await client.chat(
model: "llama3.2",
messages: [
.system("You are a helpful assistant."),
.user("In which city is Apple Inc. located?")
]
)
print(response.message.content)
} catch {
print("Error: \(error)")
}
你可以通过指定格式来请求模型返回结构化输出。传递 "json"
以获得 JSON 字符串,或指定完整的 JSON Schema
// Simple JSON format
let response = try await client.chat(
model: "llama3.2",
messages: [.user("List 3 colors.")],
format: "json"
)
// Using JSON schema for more control
let schema: Value = [
"type": "object",
"properties": [
"colors": [
"type": "array",
"items": [
"type": "object",
"properties": [
"name": ["type": "string"],
"hex": ["type": "string"]
],
"required": ["name", "hex"]
]
]
],
"required": ["colors"]
]
let response = try await client.chat(
model: "llama3.2",
messages: [.user("List 3 colors with their hex codes.")],
format: schema
)
// The response will be a JSON object matching the schema:
// {
// "colors": [
// {"name": "papayawhip", "hex": "#FFEFD5"},
// {"name": "indigo", "hex": "#4B0082"},
// {"name": "navy", "hex": "#000080"}
// ]
// }
format 参数适用于 chat
和 generate
方法。
Ollama 支持模型调用工具,允许模型执行复杂任务或与外部服务交互。
注意
工具支持需要一个兼容的模型,例如 llama3.2。
通过指定工具的名称、描述、参数和实现来定义工具
struct WeatherInput: Codable {
let city: String
}
struct WeatherOutput: Codable {
let temperature: Double
let conditions: String
}
let weatherTool = Tool<WeatherInput, WeatherOutput>(
name: "get_current_weather",
description: """
Get the current weather for a city,
with conditions ("sunny", "cloudy", etc.)
and temperature in °C.
""",
parameters: [
"type": "object",
"properties": [
"city": [
"type": "string",
"description": "The city to get weather for"
]
],
"required": ["city"]
]
) { input async throws -> WeatherOutput in
// Implement weather lookup logic here
return WeatherOutput(temperature: 18.5, conditions: "cloudy")
}
在聊天期间向模型提供工具
let messages: [Chat.Message] = [
.system("You are a helpful assistant that can check the weather."),
.user("What's the weather like in Portland?")
]
let response = try await client.chat(
model: "llama3.1",
messages: messages,
tools: [weatherTool]
)
// Handle tool calls in the response
if let toolCalls = response.message.toolCalls {
for toolCall in toolCalls {
print("Tool called: \(toolCall.function.name)")
print("Arguments: \(toolCall.function.arguments)")
}
}
工具可以在多轮对话中使用,模型可以使用工具结果来提供更详细的回复
var messages: [Chat.Message] = [
.system("You are a helpful assistant that can convert colors."),
.user("What's the hex code for yellow?")
]
// First turn - model calls the tool
let response1 = try await client.chat(
model: "llama3.1",
messages: messages,
tools: [rgbToHexTool]
)
enum ToolError {
case invalidParameters
}
// Add tool response to conversation
if let toolCall = response1.message.toolCalls?.first {
// Parse the tool arguments
guard let args = toolCall.function.arguments,
let red = Double(redStr, strict: false),
let green = Double(greenStr, strict: false),
let blue = Double(blueStr, strict: false)
else {
throw ToolError.invalidParameters
}
let input = HexColorInput(
red: red,
green: green,
blue: blue
)
// Execute the tool with the input
let hexColor = try await rgbToHexTool(input)
// Add the tool result to the conversation
messages.append(.tool(hexColor))
}
// Continue conversation with tool result
messages.append(.user("What other colors are similar?"))
let response2 = try await client.chat(
model: "llama3.1",
messages: messages,
tools: [rgbToHexTool]
)
为给定的文本生成嵌入
do {
let embeddings = try await client.createEmbeddings(
model: "llama3.2",
input: "Here is an article about llamas..."
)
print("Embeddings: \(embeddings)")
} catch {
print("Error: \(error)")
}
列出可用的模型
do {
let models = try await client.listModels()
for model in models {
print("Model: \(model.name), Modified: \(model.modifiedAt)")
}
} catch {
print("Error: \(error)")
}
获取有关特定模型的详细信息
do {
let modelInfo = try await client.showModel("llama3.2")
print("Modelfile: \(modelInfo.modelfile)")
print("Parameters: \(modelInfo.parameters)")
print("Template: \(modelInfo.template)")
} catch {
print("Error: \(error)")
}
从 Ollama 库下载模型
do {
let success = try await client.pullModel("llama3.2")
if success {
print("Model successfully pulled")
} else {
print("Failed to pull model")
}
} catch {
print("Error: \(error)")
}
do {
let success = try await client.pushModel("mynamespace/mymodel:latest")
if success {
print("Model successfully pushed")
} else {
print("Failed to push model")
}
} catch {
print("Error: \(error)")
}