2024-08-14 00:33:56 +00:00

29 lines
936 B
JSON

{
"version": "0.3.6",
"description": "Get up and running with large language models locally.",
"homepage": "https://ollama.com/",
"license": "MIT",
"url": "https://github.com/ollama/ollama/releases/download/v0.3.6/OllamaSetup.exe",
"hash": "8b832b4ce023c48772cb6e23c118e59bb5df59e61685b31c8de7fc00f8c1b985",
"innosetup": true,
"bin": "ollama.exe",
"shortcuts": [
[
"ollama app.exe",
"Ollama",
"",
"app.ico"
]
],
"checkver": {
"github": "https://github.com/ollama/ollama"
},
"autoupdate": {
"url": "https://github.com/ollama/ollama/releases/download/v$version/OllamaSetup.exe",
"hash": {
"url": "$baseurl/sha256sum.txt"
}
},
"notes": "You should have at least 8 GB of RAM available to run the 7B models, 16 GB to run the 13B models, and 32 GB to run the 33B models."
}