diff --git a/README.md b/README.md index 986ce09f..28836d0c 100644 --- a/README.md +++ b/README.md @@ -20,6 +20,15 @@ Run ai models locally. pip install ollama ``` +## Install From Source + +``` +git clone git@github.com:jmorganca/ollama ollama +cd ollama +pip install -r requirements.txt +pip install -e . +``` + ## Quickstart ``` diff --git a/docs/development.md b/docs/development.md index 7cc00870..6703cfdf 100644 --- a/docs/development.md +++ b/docs/development.md @@ -1,48 +1,33 @@ # Development -ollama is built and run using [Poetry](https://python-poetry.org/). - -## Running - -**Start backend service:** - -Install dependencies: +ollama is built using Python 3 and uses [Poetry](https://python-poetry.org/) to manage dependencies and build packages. ``` -poetry install --extras server +pip install poetry ``` -Run a server: +Install ollama and its dependencies: ``` -poetry run ollama serve +poetry install --extras server --with dev ``` -## Building +Run ollama server: -If using Apple silicon, you need a Python version that supports arm64: - -```bash -wget https://github.com/conda-forge/miniforge/releases/latest/download/Miniforge3-MacOSX-arm64.sh -bash Miniforge3-MacOSX-arm64.sh +``` +poetry run ollama server ``` -Get the dependencies: +Update dependencies: -```bash -poetry install --extras server +``` +poetry update --extras server --with dev +poetry lock +poetry export >requirements.txt ``` -Then build a binary for your current platform: +Build binary package: -```bash +``` poetry build ``` - -## Update requirements.txt - -In the root directory, run: - -``` -pipreqs . --force -``` diff --git a/pyproject.toml b/pyproject.toml index 18464c15..5371132f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -2,7 +2,7 @@ name = "ollama" version = "0.0.2" description = "Run ai models locally" -authors = ["Ollama team"] +authors = ["ollama team"] readme = "README.md" packages = [{include = "ollama"}] scripts = {ollama = "ollama.cmd.cli:main"}