diff --git a/examples/README.md b/examples/README.md index 019d8f84..513857fa 100644 --- a/examples/README.md +++ b/examples/README.md @@ -1,6 +1,6 @@ # Examples -This directory contains examples that can be created and run with `ollama`. +This directory contains different examples of using Ollama To create a model: diff --git a/examples/langchain-document/README.md b/examples/langchain-document/README.md new file mode 100644 index 00000000..cd3b3491 --- /dev/null +++ b/examples/langchain-document/README.md @@ -0,0 +1,15 @@ +# LangChain Web Summarization + +This example summarizes a website + +## Setup + +``` +pip install -r requirements.txt +``` + +## Run + +``` +python main.py +``` diff --git a/examples/langchain-document/main.py b/examples/langchain-document/main.py new file mode 100644 index 00000000..2bb25d75 --- /dev/null +++ b/examples/langchain-document/main.py @@ -0,0 +1,12 @@ +from langchain.llms import Ollama +from langchain.document_loaders import WebBaseLoader +from langchain.chains.summarize import load_summarize_chain + +loader = WebBaseLoader("https://ollama.ai/blog/run-llama2-uncensored-locally") +docs = loader.load() + +llm = Ollama(model="llama2") +chain = load_summarize_chain(llm, chain_type="stuff") + +result = chain.run(docs) +print(result) diff --git a/examples/langchain-document/requirements.txt b/examples/langchain-document/requirements.txt new file mode 100644 index 00000000..09f75597 --- /dev/null +++ b/examples/langchain-document/requirements.txt @@ -0,0 +1,2 @@ +langchain==0.0.259 +bs4==0.0.1 \ No newline at end of file diff --git a/examples/langchain-web-summary/README.md b/examples/langchain-web-summary/README.md new file mode 100644 index 00000000..cd3b3491 --- /dev/null +++ b/examples/langchain-web-summary/README.md @@ -0,0 +1,15 @@ +# LangChain Web Summarization + +This example summarizes a website + +## Setup + +``` +pip install -r requirements.txt +``` + +## Run + +``` +python main.py +``` diff --git a/examples/langchain-web-summary/main.py b/examples/langchain-web-summary/main.py new file mode 100644 index 00000000..2bb25d75 --- /dev/null +++ b/examples/langchain-web-summary/main.py @@ -0,0 +1,12 @@ +from langchain.llms import Ollama +from langchain.document_loaders import WebBaseLoader +from langchain.chains.summarize import load_summarize_chain + +loader = WebBaseLoader("https://ollama.ai/blog/run-llama2-uncensored-locally") +docs = loader.load() + +llm = Ollama(model="llama2") +chain = load_summarize_chain(llm, chain_type="stuff") + +result = chain.run(docs) +print(result) diff --git a/examples/langchain-web-summary/requirements.txt b/examples/langchain-web-summary/requirements.txt new file mode 100644 index 00000000..09f75597 --- /dev/null +++ b/examples/langchain-web-summary/requirements.txt @@ -0,0 +1,2 @@ +langchain==0.0.259 +bs4==0.0.1 \ No newline at end of file diff --git a/examples/langchain/README.md b/examples/langchain/README.md new file mode 100644 index 00000000..0319a191 --- /dev/null +++ b/examples/langchain/README.md @@ -0,0 +1,15 @@ +# LangChain + +This example is a basic "hello world" of using LangChain with Ollama. + +## Setup + +``` +pip install -r requirements.txt +``` + +## Run + +``` +python main.py +``` diff --git a/examples/langchain/main.py b/examples/langchain/main.py new file mode 100644 index 00000000..620c13cb --- /dev/null +++ b/examples/langchain/main.py @@ -0,0 +1,4 @@ +from langchain.llms import Ollama +llm = Ollama(model="llama2") +res = llm.predict("hi!") +print (res) diff --git a/examples/langchain/requirements.txt b/examples/langchain/requirements.txt new file mode 100644 index 00000000..33cf51b3 --- /dev/null +++ b/examples/langchain/requirements.txt @@ -0,0 +1 @@ +langchain==0.0.259