diff --git a/examples/.gitignore b/examples/.gitignore index ad2c3ce5..b60652b6 100644 --- a/examples/.gitignore +++ b/examples/.gitignore @@ -1,7 +1,10 @@ node_modules +bun.lockb +.vscode # OSX .DS_STORE + # Models models/ diff --git a/examples/langchain-python-rag-websummary/README.md b/examples/langchain-python-rag-websummary/README.md index cd3b3491..9ccc54cc 100644 --- a/examples/langchain-python-rag-websummary/README.md +++ b/examples/langchain-python-rag-websummary/README.md @@ -1,15 +1,23 @@ # LangChain Web Summarization -This example summarizes a website +This example summarizes the website, [https://ollama.ai/blog/run-llama2-uncensored-locally](https://ollama.ai/blog/run-llama2-uncensored-locally) -## Setup +## Running the Example -``` -pip install -r requirements.txt -``` +1. Ensure you have the `llama2` model installed: -## Run + ```bash + ollama pull llama2 + ``` -``` -python main.py -``` +2. Install the Python Requirements. + + ```bash + pip install -r requirements.txt + ``` + +3. Run the example: + + ```bash + python main.py + ``` diff --git a/examples/langchain-python-rag-websummary/requirements.txt b/examples/langchain-python-rag-websummary/requirements.txt index 09f75597..33cf51b3 100644 --- a/examples/langchain-python-rag-websummary/requirements.txt +++ b/examples/langchain-python-rag-websummary/requirements.txt @@ -1,2 +1 @@ langchain==0.0.259 -bs4==0.0.1 \ No newline at end of file diff --git a/examples/langchain-python-simple/README.md b/examples/langchain-python-simple/README.md index 7fa84a3a..3f401ca8 100644 --- a/examples/langchain-python-simple/README.md +++ b/examples/langchain-python-simple/README.md @@ -2,20 +2,23 @@ This example is a basic "hello world" of using LangChain with Ollama. -## Setup +## Running the Example -``` -pip install -r requirements.txt -``` +1. Ensure you have the `llama2` model installed: -## Run + ```bash + ollama pull llama2 + ``` -``` -python main.py -``` +2. Install the Python Requirements. -Running this example will print the response for "hello": + ```bash + pip install -r requirements.txt + ``` -``` -Hello! It's nice to meet you. hopefully you are having a great day! Is there something I can help you with or would you like to chat? -``` +3. Run the example: + + ```bash + python main.py + ``` + \ No newline at end of file diff --git a/examples/langchain-python-simple/main.py b/examples/langchain-python-simple/main.py index c8cde83b..da696e00 100644 --- a/examples/langchain-python-simple/main.py +++ b/examples/langchain-python-simple/main.py @@ -1,4 +1,6 @@ from langchain.llms import Ollama + +input = input("What is your question?") llm = Ollama(model="llama2") -res = llm.predict("hello") +res = llm.predict(input) print (res) diff --git a/examples/langchain-typescript-simple/README.md b/examples/langchain-typescript-simple/README.md index 80d025b7..7c65ccfa 100644 --- a/examples/langchain-typescript-simple/README.md +++ b/examples/langchain-typescript-simple/README.md @@ -2,20 +2,22 @@ This example is a basic "hello world" of using LangChain with Ollama using Node.js and Typescript. -## Setup +## Running the Example -```shell -npm install -``` +1. Install the prerequisites: -## Run + ```bash + npm install + ``` -```shell -ts-node main.ts -``` +2. Ensure the `mistral` model is available: -Running this example will print the response for "hello": + ```bash + ollama pull mistral + ``` -```plaintext -Hello! It's nice to meet you. hopefully you are having a great day! Is there something I can help you with or would you like to chat? -``` +3. Run the example: + + ```bash + npm start + ``` diff --git a/examples/langchain-typescript-simple/main.ts b/examples/langchain-typescript-simple/main.ts index cd77f178..53a58371 100644 --- a/examples/langchain-typescript-simple/main.ts +++ b/examples/langchain-typescript-simple/main.ts @@ -1,15 +1,25 @@ -import { Ollama} from 'langchain/llms/ollama'; +import { Ollama } from 'langchain/llms/ollama'; +import * as readline from "readline"; async function main() { const ollama = new Ollama({ model: 'mistral' // other parameters can be found at https://js.langchain.com/docs/api/llms_ollama/classes/Ollama - }) - const stream = await ollama.stream("Hello"); + }); - for await (const chunk of stream) { - process.stdout.write(chunk); - } + const rl = readline.createInterface({ + input: process.stdin, + output: process.stdout, + }); + + rl.question("What is your question: \n", async (user_input) => { + const stream = await ollama.stream(user_input); + + for await (const chunk of stream) { + process.stdout.write(chunk); + } + rl.close(); + }) } main(); \ No newline at end of file diff --git a/examples/langchain-typescript-simple/package-lock.json b/examples/langchain-typescript-simple/package-lock.json index 443b79ec..90587d20 100644 --- a/examples/langchain-typescript-simple/package-lock.json +++ b/examples/langchain-typescript-simple/package-lock.json @@ -1,5 +1,5 @@ { - "name": "with-langchain-typescript-simplegenerate", + "name": "langchain-typescript-simple", "lockfileVersion": 3, "requires": true, "packages": { diff --git a/examples/langchain-typescript-simple/package.json b/examples/langchain-typescript-simple/package.json index 33035cd6..5d6a5b88 100644 --- a/examples/langchain-typescript-simple/package.json +++ b/examples/langchain-typescript-simple/package.json @@ -1,8 +1,13 @@ { + "scripts": { + "start": "tsx main.ts" + }, "devDependencies": { - "typescript": "^5.2.2" + "tsx": "^4.6.2", + "typescript": "^5.3.3" }, "dependencies": { - "langchain": "^0.0.165" + "langchain": "^0.0.165", + "readline": "^1.3.0" } } diff --git a/examples/modelfile-tweetwriter/readme.md b/examples/modelfile-tweetwriter/readme.md new file mode 100644 index 00000000..51111259 --- /dev/null +++ b/examples/modelfile-tweetwriter/readme.md @@ -0,0 +1,23 @@ +# Example Modelfile - Tweetwriter + +This simple examples shows what you can do without any code, simply relying on a Modelfile. The file has two instructions: + +1. FROM - The From instructions defines the parent model to use for this one. If you choose a model from the library, you can enter just the model name. For all other models, you need to specify the namespace as well. You could also use a local file. Just include the relative path to the converted, quantized model weights file. To learn more about creating that file, see the `import.md` file in the docs folder of this repository. +2. SYSTEM - This defines the system prompt for the model and overrides the system prompt from the parent model. + +## Running the Example + +1. Create the model: + + ```bash + ollama create tweetwriter + ``` + +2. Enter a topic to generate a tweet about. +3. Show the Modelfile in the REPL. + + ```bash + /show modelfile + ``` + + Notice that the FROM and SYSTEM match what was in the file. But there is also a TEMPLATE and PARAMETER. These are inherited from the parent model. \ No newline at end of file diff --git a/examples/python-dockerit/README.md b/examples/python-dockerit/README.md index 4c200b5e..2ba00ce2 100644 --- a/examples/python-dockerit/README.md +++ b/examples/python-dockerit/README.md @@ -1,15 +1,31 @@ # DockerIt -DockerIt is a tool to help you build and run your application in a Docker container. It consists of a model that defines the system prompt and model weights to use, along with a python script to then build the container and run the image automatically. +DockerIt is a tool to help you build and run your application in a Docker container. It consists of a model that defines the system prompt and model weights to use, along with a python script to then build the container and run the image automatically. + +## Running the Example + +1. Ensure you have the `mattw/dockerit` model installed: + + ```bash + ollama pull mattw/dockerit + ``` + +2. Make sure Docker is running on your machine. + +3. Install the Python Requirements. + + ```bash + pip install -r requirements.txt + ``` + +4. Run the example: + + ```bash + python dockerit.py "simple postgres server with admin password set to 123" + ``` + +5. Enter the name you would like to use for your container image. ## Caveats -This is an simple example. It's assuming the Dockerfile content generated is going to work. In many cases, even with simple web servers, it fails when trying to copy files that don't exist. It's simply an example of what you could possibly do. - -## Example Usage - -```bash -> python3 ./dockerit.py "simple postgres server with admin password set to 123" -Enter the name of the image: matttest -Container named happy_keller started with id: 7c201bb6c30f02b356ddbc8e2a5af9d7d7d7b8c228519c9a501d15c0bd9d6b3e -``` +This is a simple example. It's assuming the Dockerfile content generated is going to work. In many cases, even with simple web servers, it fails when trying to copy files that don't exist. It's simply an example of what you could possibly do. diff --git a/examples/python-json-datagenerator/readme.md b/examples/python-json-datagenerator/readme.md index 2dc958e7..ec5701be 100644 --- a/examples/python-json-datagenerator/readme.md +++ b/examples/python-json-datagenerator/readme.md @@ -4,6 +4,32 @@ There are two python scripts in this example. `randomaddresses.py` generates random addresses from different countries. `predefinedschema.py` sets a template for the model to fill in. +## Running the Example + +1. Ensure you have the `llama2` model installed: + + ```bash + ollama pull llama2 + ``` + +2. Install the Python Requirements. + + ```bash + pip install -r requirements.txt + ``` + +3. Run the Random Addresses example: + + ```bash + python randomaddresses.py + ``` + +4. Run the Predefined Schema example: + + ```bash + python predefinedschema.py + ``` + ## Review the Code Both programs are basically the same, with a different prompt for each, demonstrating two different ideas. The key part of getting JSON out of a model is to state in the prompt or system prompt that it should respond using JSON, and specifying the `format` as `json` in the data body. diff --git a/examples/python-loganalysis/loganalysis.py b/examples/python-loganalysis/loganalysis.py index 2b7ddd48..4c7eccbd 100644 --- a/examples/python-loganalysis/loganalysis.py +++ b/examples/python-loganalysis/loganalysis.py @@ -16,12 +16,12 @@ def find_errors_in_log_file(): with open(log_file_path, 'r') as log_file: log_lines = log_file.readlines() -error_logs = [] - for i, line in enumerate(log_lines): - if "error" in line.lower(): - start_index = max(0, i - prelines) - end_index = min(len(log_lines), i + postlines + 1) - error_logs.extend(log_lines[start_index:end_index]) + error_logs = [] + for i, line in enumerate(log_lines): + if "error" in line.lower(): + start_index = max(0, i - prelines) + end_index = min(len(log_lines), i + postlines + 1) + error_logs.extend(log_lines[start_index:end_index]) return error_logs @@ -32,7 +32,6 @@ data = { "model": "mattw/loganalyzer" } - response = requests.post("http://localhost:11434/api/generate", json=data, stream=True) for line in response.iter_lines(): if line: diff --git a/examples/python-loganalysis/readme.md b/examples/python-loganalysis/readme.md index fbfb89a1..828e8de2 100644 --- a/examples/python-loganalysis/readme.md +++ b/examples/python-loganalysis/readme.md @@ -2,12 +2,34 @@ ![loganalyzer 2023-11-10 08_53_29](https://github.com/jmorganca/ollama/assets/633681/ad30f1fc-321f-4953-8914-e30e24db9921) -This example shows one possible way to create a log file analyzer. To use it, run: +This example shows one possible way to create a log file analyzer. It uses the model **mattw/loganalyzer** which is based on **codebooga**, a 34b parameter model. + +To use it, run: `python loganalysis.py ` You can try this with the `logtest.logfile` file included in this directory. +## Running the Example + +1. Ensure you have the `mattw/loganalyzer` model installed: + + ```bash + ollama pull mattw/loganalyzer + ``` + +2. Install the Python Requirements. + + ```bash + pip install -r requirements.txt + ``` + +3. Run the example: + + ```bash + python loganalysis.py logtest.logfile + ``` + ## Review the code The first part of this example is a Modelfile that takes `codebooga` and applies a new System Prompt: @@ -45,4 +67,4 @@ for line in response.iter_lines(): There is a lot more that can be done here. This is a simple way to detect errors, looking for the word error. Perhaps it would be interesting to find anomalous activity in the logs. It could be interesting to create embeddings for each line and compare them, looking for similar lines. Or look into applying Levenshtein Distance algorithms to find similar lines to help identify the anomalous lines. -Also try different models and different prompts to analyze the data. You could consider adding retrieval augmented generation (RAG) to this to help understand newer log formats. +Try different models and different prompts to analyze the data. You could consider adding retrieval augmented generation (RAG) to this to help understand newer log formats. diff --git a/examples/python-rag-newssummary/README.md b/examples/python-rag-newssummary/README.md index fbaabba4..51a68be1 100644 --- a/examples/python-rag-newssummary/README.md +++ b/examples/python-rag-newssummary/README.md @@ -14,9 +14,22 @@ This example goes through a series of steps: This example lets you pick from a few different topic areas, then summarize the most recent x articles for that topic. It then creates chunks of sentences from each article and then generates embeddings for each of those chunks. -You can run the example like this: +## Running the Example -```bash -pip install -r requirements.txt -python summ.py -``` +1. Ensure you have the `mistral-openorca` model installed: + + ```bash + ollama pull mistral-openorca + ``` + +2. Install the Python Requirements. + + ```bash + pip install -r requirements.txt + ``` + +3. Run the example: + + ```bash + python summ.py + ``` diff --git a/examples/python-simplechat/client.py b/examples/python-simplechat/client.py index 3c480f97..768a2289 100644 --- a/examples/python-simplechat/client.py +++ b/examples/python-simplechat/client.py @@ -24,7 +24,6 @@ def chat(messages): # the response streams one token at a time, print that as we receive it print(content, end="", flush=True) - if body.get("done", False): message["content"] = output return message @@ -32,9 +31,11 @@ def chat(messages): def main(): messages = [] - + while True: user_input = input("Enter a prompt: ") + if not user_input: + exit() print() messages.append({"role": "user", "content": user_input}) message = chat(messages) diff --git a/examples/python-simplechat/readme.md b/examples/python-simplechat/readme.md index abbdfe7e..204a8159 100644 --- a/examples/python-simplechat/readme.md +++ b/examples/python-simplechat/readme.md @@ -1,6 +1,26 @@ # Simple Chat Example -The **chat** endpoint is one of two ways to generate text from an LLM with Ollama. At a high level you provide the endpoint an array of objects with a role and content specified. Then with each output and prompt, you add more of those role/content objects, which builds up the history. +The **chat** endpoint is one of two ways to generate text from an LLM with Ollama, and is introduced in version 0.1.14. At a high level, you provide the endpoint an array of objects with a role and content specified. Then with each output and prompt, you add more of those role/content objects, which builds up the history. + +## Running the Example + +1. Ensure you have the `llama2` model installed: + + ```bash + ollama pull llama2 + ``` + +2. Install the Python Requirements. + + ```bash + pip install -r requirements.txt + ``` + +3. Run the example: + + ```bash + python client.py + ``` ## Review the Code diff --git a/examples/python-simplechat/requirements.txt b/examples/python-simplechat/requirements.txt new file mode 100644 index 00000000..9688b8ec --- /dev/null +++ b/examples/python-simplechat/requirements.txt @@ -0,0 +1 @@ +Requests==2.31.0 diff --git a/examples/python-simplegenerate/README.md b/examples/python-simplegenerate/README.md new file mode 100644 index 00000000..a9175207 --- /dev/null +++ b/examples/python-simplegenerate/README.md @@ -0,0 +1,29 @@ +# Simple Generate Example + +This is a simple example using the **Generate** endpoint. + +## Running the Example + +1. Ensure you have the `stablelm-zephyr` model installed: + + ```bash + ollama pull stablelm-zephyr + ``` + +2. Install the Python Requirements. + + ```bash + pip install -r requirements.txt + ``` + +3. Run the example: + + ```bash + python client.py + ``` + +## Review the Code + +The **main** function simply asks for input, then passes that to the generate function. The output from generate is then passed back to generate on the next run. + +The **generate** function uses `requests.post` to call `/api/generate`, passing the model, prompt, and context. The `generate` endpoint returns a stream of JSON blobs that are then iterated through, looking for the response values. That is then printed out. The final JSON object includes the full context of the conversation so far, and that is the return value from the function. diff --git a/examples/python-simplegenerate/client.py b/examples/python-simplegenerate/client.py index 9bd0d035..7b5cf810 100644 --- a/examples/python-simplegenerate/client.py +++ b/examples/python-simplegenerate/client.py @@ -2,7 +2,7 @@ import json import requests # NOTE: ollama must be running for this to work, start the ollama app or run `ollama serve` -model = 'llama2' # TODO: update this for whatever model you wish to use +model = 'stablelm-zephyr' # TODO: update this for whatever model you wish to use def generate(prompt, context): r = requests.post('http://localhost:11434/api/generate', @@ -30,6 +30,8 @@ def main(): context = [] # the context stores a conversation history, you can use this to make the model more context aware while True: user_input = input("Enter a prompt: ") + if not user_input: + exit() print() context = generate(user_input, context) print() diff --git a/examples/python-simplegenerate/requirements.txt b/examples/python-simplegenerate/requirements.txt new file mode 100644 index 00000000..9688b8ec --- /dev/null +++ b/examples/python-simplegenerate/requirements.txt @@ -0,0 +1 @@ +Requests==2.31.0 diff --git a/examples/typescript-mentors/README.md b/examples/typescript-mentors/README.md index 5b8349e8..5ab1cc55 100644 --- a/examples/typescript-mentors/README.md +++ b/examples/typescript-mentors/README.md @@ -4,18 +4,62 @@ This example demonstrates how one would create a set of 'mentors' you can have a ## Usage -```bash -ts-node ./character-generator.ts "Lorne Greene" -``` +1. Add llama2 to have the mentors ask your questions: -This will create `lornegreene/Modelfile`. Now you can create a model with this command: + ```bash + ollama pull llama2 + ``` -```bash -ollama create lornegreene -f lornegreene/Modelfile -``` +2. Install prerequisites: -If you want to add your own mentors, you will have to update the code to look at your namespace instead of **mattw**. Also set the list of mentors to include yours. + ```bash + npm install + ``` -```bash -ts-node ./mentors.ts "What is a Jackalope?" -``` +3. Ask a question: + + ```bash + npm start "what is a jackalope" + ``` + +You can also add your own character to be chosen at random when you ask a question. + +1. Make sure you have the right model installed: + + ```bash + ollama pull stablebeluga2:70b-q4_K_M + ``` + +2. Create a new character: + + ```bash + npm run charactergen "Lorne Greene" + ``` + + You can choose any well-known person you like. This example will create `lornegreene/Modelfile`. + +3. Now you can create a model with this command: + + ```bash + ollama create /lornegreene -f lornegreene/Modelfile + ``` + + `YourNamespace` is whatever name you set up when you signed up at [https://ollama.ai/signup](https://ollama.ai/signup). + +4. To add this to your mentors, you will have to update the code as follows. On line 8 of `mentors.ts`, add an object to the array, replacing `` with the namespace you used above. + + ```bash + {ns: "", char: "Lorne Greene"} + ``` + +## Review the Code + +There are two scripts you can run in this example. The first is the main script to ask the mentors a question. The other one lets you generate a character to add to the mentors. Both scripts are mostly about adjusting the prompts at each inference stage. + +### mentors.ts + +In the **main** function, it starts by generating a list of mentors. This chooses 3 from a list of interesting characters. Then we ask for a question, and then things get interesting. We set the prompt for each of the 3 mentors a little differently. And the 2nd and 3rd mentors see what the previous folks said. The other functions in mentors sets the prompts for each mentor. + +### character-generator.ts + +**Character Generator** simply customizes the prompt to build a character profile for any famous person. And most of the script is just tweaking the prompt. This uses Stable Beluga 2 70b parameters. The 70b models tend to do better writing a bio about a character than smaller models, and Stable Beluga seemed to do better than Llama 2. Since this is used at development time for the characters, it doesn't affect the runtime of asking the mentors for their input. diff --git a/examples/typescript-mentors/mentors.ts b/examples/typescript-mentors/mentors.ts index 3c05b846..17d70476 100644 --- a/examples/typescript-mentors/mentors.ts +++ b/examples/typescript-mentors/mentors.ts @@ -2,10 +2,11 @@ import { Ollama } from 'ollama-node'; const mentorCount = 3; const ollama = new Ollama(); +type Mentor = { ns: string, char: string }; -function getMentors(): string[] { - const mentors = ['Gary Vaynerchuk', 'Kanye West', 'Martha Stewart', 'Neil deGrasse Tyson', 'Owen Wilson', 'Ronald Reagan', 'Donald Trump', 'Barack Obama', 'Jeff Bezos']; - const chosenMentors: string[] = []; +function getMentors(): Mentor[] { + const mentors = [{ ns: 'mattw', char: 'Gary Vaynerchuk' }, { ns: 'mattw', char: 'Kanye West'}, {ns: 'mattw', char: 'Martha Stewart'}, {ns: 'mattw', char: 'Neil deGrasse Tyson'}, {ns: 'mattw', char: 'Owen Wilson'}, {ns: 'mattw', char: 'Ronald Reagan'}, {ns: 'mattw', char: 'Donald Trump'}, {ns: 'mattw', char: 'Barack Obama'}, {ns: 'mattw', char: 'Jeff Bezos'}]; + const chosenMentors: Mentor[] = []; for (let i = 0; i < mentorCount; i++) { const mentor = mentors[Math.floor(Math.random() * mentors.length)]; chosenMentors.push(mentor); @@ -14,12 +15,12 @@ function getMentors(): string[] { return chosenMentors; } -function getMentorFileName(mentor: string): string { - const model = mentor.toLowerCase().replace(/\s/g, ''); - return `mattw/${model}`; +function getMentorFileName(mentor: Mentor): string { + const model = mentor.char.toLowerCase().replace(/\s/g, ''); + return `${mentor.ns}/${model}`; } -async function getSystemPrompt(mentor: string, isLast: boolean, question: string): Promise { +async function getSystemPrompt(mentor: Mentor, isLast: boolean, question: string): Promise { ollama.setModel(getMentorFileName(mentor)); const info = await ollama.showModelInfo() let SystemPrompt = info.system || ''; @@ -43,8 +44,8 @@ async function main() { ollama.setModel(getMentorFileName(mentor)); ollama.setSystemPrompt(SystemPrompt); let output = ''; - process.stdout.write(`\n${mentor}: `); - for await (const chunk of ollama.streamingGenerate(theConversation + `Continue the conversation as if you were ${mentor} on the question "${question}".`)) { + process.stdout.write(`\n${mentor.char}: `); + for await (const chunk of ollama.streamingGenerate(theConversation + `Continue the conversation as if you were ${mentor.char} on the question "${question}".`)) { if (chunk.response) { output += chunk.response; process.stdout.write(chunk.response); @@ -52,7 +53,7 @@ async function main() { process.stdout.write('\n'); } } - theConversation += `${mentor}: ${output}\n\n` + theConversation += `${mentor.char}: ${output}\n\n` } } diff --git a/examples/typescript-mentors/package.json b/examples/typescript-mentors/package.json index d4e37562..537f3df1 100644 --- a/examples/typescript-mentors/package.json +++ b/examples/typescript-mentors/package.json @@ -1,7 +1,15 @@ { + "scripts": { + "charactergen": "tsx character-generator.ts", + "start": "tsx mentors.ts" + }, "dependencies": { "fs": "^0.0.1-security", "ollama-node": "^0.0.3", "path": "^0.12.7" + }, + "devDependencies": { + "tsx": "^4.6.2", + "typescript": "^5.3.3" } } diff --git a/examples/typescript-simplechat/package.json b/examples/typescript-simplechat/package.json index 4ee1647d..6ae8c1aa 100644 --- a/examples/typescript-simplechat/package.json +++ b/examples/typescript-simplechat/package.json @@ -1 +1,12 @@ -{ "dependencies": { "@types/node": "^20.10.4", "prompt-sync": "^4.2.0", "readline": "^1.3.0" } } \ No newline at end of file +{ + "scripts": { + "start": "tsx client.ts" + }, + "dependencies": { + "@types/node": "^20.10.4", + "prompt-sync": "^4.2.0", + "readline": "^1.3.0", + "tsx": "^4.6.2", + "typescript": "^5.3.3" + } + } \ No newline at end of file diff --git a/examples/typescript-simplechat/readme.md b/examples/typescript-simplechat/readme.md index ccd4aaf6..5635b9d2 100644 --- a/examples/typescript-simplechat/readme.md +++ b/examples/typescript-simplechat/readme.md @@ -1,14 +1,10 @@ # Simple Chat Example -The **chat** endpoint is one of two ways to generate text from an LLM with Ollama. At a high level you provide the endpoint an array of message objects with a role and content specified. Then with each output and prompt, you add more messages, which builds up the history. +The **chat** endpoint, available as of v0.1.14, is one of two ways to generate text from an LLM with Ollama. At a high level, you provide the endpoint an array of message objects with a role and content specified. Then with each output and prompt, you add more messages, which builds up the history. ## Run the Example -There are a few ways to run this, just like any Typescript code: - -1. Compile with `tsc` and then run it with `node client.js`. -2. Install `tsx` and run it with `tsx client.ts`. -3. Install `bun` and run it with `bun client.ts`. +`npm start` ## Review the Code @@ -30,7 +26,7 @@ With the **generate** endpoint, you need to provide a `prompt`. But with **chat* The final JSON object doesn't provide the full content, so you will need to build the content yourself. In this example, **chat** takes the full array of messages and outputs the resulting message from this call of the chat endpoint. -In the **askQuestion** function, we collect `user_input` and add it as a message to our messages and that is passed to the chat function. When the LLM is done responding the output is added as another message to the messages array. +In the **askQuestion** function, we collect `user_input` and add it as a message to our messages, and that is passed to the chat function. When the LLM is done responding, the output is added as another message to the messages array. At the end, you will see a printout of all the messages.