From b45cb209d1acb4c0168fb119cc41b05c31e6774b Mon Sep 17 00:00:00 2001 From: Roman Inflianskas Date: Fri, 10 May 2024 09:48:49 +0300 Subject: Improve prompt for card generation Previously, generated cards with llama3 model were broken, because llama3 was too chatty and provided front and back sides when asked for a question. Update the prompt to make generation correct. --- src/ollama.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src/ollama.tsx') diff --git a/src/ollama.tsx b/src/ollama.tsx index 2c70685..fb1c7fa 100644 --- a/src/ollama.tsx +++ b/src/ollama.tsx @@ -279,7 +279,7 @@ export async function convertToFlashCard(uuid: string, blockContent: string) { try { const questionBlock = await logseq.Editor.insertBlock(uuid, "⌛Genearting question....", { before: false }) const answerBlock = await logseq.Editor.insertBlock(questionBlock!.uuid, "⌛Genearting answer....", { before: false }) - const question = await promptLLM(`Create a question about this that would fit in a flashcard:\n ${blockContent}`) + const question = await promptLLM(`Create a question for a flashcard. Provide the question only. Here is the knowledge to check:\n ${blockContent}`) const answer = await promptLLM(`Given the question ${question} and the context of ${blockContent} What is the answer? be as brief as possible and provide the answer only.`) await logseq.Editor.updateBlock(questionBlock!.uuid, `${question} #card`) await delay(300) -- cgit v1.2.3