From 123e0537eecf343dd19dfa9fe638d5509a042086 Mon Sep 17 00:00:00 2001 From: "Thomas A. Christensen II" <25492070+MillironX@users.noreply.github.com> Date: Wed, 4 Sep 2024 15:58:48 -0500 Subject: [PATCH] refactor: Prompt Llama3 via PromptingTools --- main.jl | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/main.jl b/main.jl index a8886de..f90f49d 100755 --- a/main.jl +++ b/main.jl @@ -28,8 +28,6 @@ end #for # We will be offloading the analysis to Ollama running Llama3.1 locally questions = [:Q8, :Q16, :Q29, :Q30] -run(`ollama serve`) - for q in questions analysis_prompt = """ The following is a list of answers to a survey with one response per paragraph: @@ -52,7 +50,12 @@ for q in questions @info "Prompting Llama3.1 with \n```\n$analysis_prompt\n```\n" - analysis_response = read(run(`ollama run llama3.1 $analysis_prompt`), String) + analysis_response = aigenerate( + PromptingTools.OllamaSchema(), + analysis_prompt; + model="llama3.1", + api_kwargs=(; options=(; num_gpu=99)) + ).content @info "Llama3.1 responsed with \n```\n$analysis_response\n```\n" end #for