Ollama AI Chat
Ollama API DocumentationEndpoint: GET /api/tags
Description: Returns a list of available models
{
"models": [
{
"name": "llama2",
"modified_at": "2024-01-01T12:00:00Z",
"size": 4563416
}
]
}
Endpoint: POST /api/generate
Description: Generate a response from the model
{
"model": "llama2",
"prompt": "What is artificial intelligence?",
"stream": true,
"images": ["base64_encoded_image"] // Optional
}
Response (Stream):
{
"model": "llama2",
"created_at": "2024-01-01T12:00:00Z",
"response": "Artificial intelligence...",
"done": false
}
Endpoint: GET /api/show
Description: Get details about a specific model
{
"license": "MIT",
"modelfile": "FROM llama2\nPARAMETER temp 0.7",
"parameters": "temp 0.7",
"template": "{{ .Prompt }}",
"system": "You are a helpful AI assistant."
}
Endpoint: POST /api/create
Description: Create a new model from a Modelfile
{
"name": "custom_model",
"modelfile": "FROM llama2\nSYSTEM You are a helpful assistant",
"stream": true
}
Endpoint: POST /api/copy
Description: Create a copy of a model
{
"source": "llama2",
"destination": "llama2-copy"
}
Endpoint: DELETE /api/delete
Description: Delete a model
{
"name": "model_name"
}
Endpoint: POST /api/pull
Description: Download a model from a registry
{
"name": "llama2",
"stream": true
}
Endpoint: POST /api/push
Description: Upload a model to a registry
{
"name": "username/model:latest",
"stream": true
}
Endpoint: POST /api/embeddings
Description: Generate embeddings from a model
{
"model": "llama2",
"prompt": "Here is some text to generate embeddings for"
}
Response:
{
"embeddings": [0.1, 0.2, 0.3, ...],
}