From 8fc627bc26755ea8713a3934b1a86593ea449915 Mon Sep 17 00:00:00 2001 From: "Thomas A. Christensen II" <25492070+MillironX@users.noreply.github.com> Date: Wed, 4 Sep 2024 16:02:16 -0500 Subject: [PATCH] refactor: Add prompt-specific logic to function --- main.jl | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/main.jl b/main.jl index f90f49d..f00504d 100755 --- a/main.jl +++ b/main.jl @@ -28,6 +28,18 @@ end #for # We will be offloading the analysis to Ollama running Llama3.1 locally questions = [:Q8, :Q16, :Q29, :Q30] +function logged_prompt(prompt) + @info "Prompting Llama3.1 with \n```$prompt\n```\n" + response = aigenerate( + PromptingTools.OllamaSchema(), + prompt; + model="llama3.1", + api_kwargs=(; options=(; num_gpu=99)) + ).content + @info "Llama3.1 responsed with \n```\n$response\n```\n" + return response +end #function + for q in questions analysis_prompt = """ The following is a list of answers to a survey with one response per paragraph: @@ -48,16 +60,8 @@ for q in questions Summarize the common themes between the survey responses. """ - @info "Prompting Llama3.1 with \n```\n$analysis_prompt\n```\n" + analysis_response = logged_prompt(analysis_prompt) - analysis_response = aigenerate( - PromptingTools.OllamaSchema(), - analysis_prompt; - model="llama3.1", - api_kwargs=(; options=(; num_gpu=99)) - ).content - - @info "Llama3.1 responsed with \n```\n$analysis_response\n```\n" end #for