diff options
| author | Roman Inflianskas <rominf@pm.me> | 2024-05-10 09:48:49 +0300 |
|---|---|---|
| committer | Roman Inflianskas <rominf@pm.me> | 2024-05-10 10:26:16 +0300 |
| commit | b45cb209d1acb4c0168fb119cc41b05c31e6774b (patch) | |
| tree | 830f11f3a1c96ec3b0f75c32a963bb41b7543e31 /src/ollama.tsx | |
| parent | 2ef25a3b0ed6992a3ff4dcd06898196f927e0899 (diff) | |
| download | ollama-logseq-b45cb209d1acb4c0168fb119cc41b05c31e6774b.tar.xz ollama-logseq-b45cb209d1acb4c0168fb119cc41b05c31e6774b.zip | |
Improve prompt for card generation
Previously, generated cards with llama3 model were broken, because
llama3 was too chatty and provided front and back sides when asked for a
question. Update the prompt to make generation correct.
Diffstat (limited to 'src/ollama.tsx')
| -rw-r--r-- | src/ollama.tsx | 2 |
1 files changed, 1 insertions, 1 deletions
diff --git a/src/ollama.tsx b/src/ollama.tsx index 2c70685..fb1c7fa 100644 --- a/src/ollama.tsx +++ b/src/ollama.tsx @@ -279,7 +279,7 @@ export async function convertToFlashCard(uuid: string, blockContent: string) { try { const questionBlock = await logseq.Editor.insertBlock(uuid, "⌛Genearting question....", { before: false }) const answerBlock = await logseq.Editor.insertBlock(questionBlock!.uuid, "⌛Genearting answer....", { before: false }) - const question = await promptLLM(`Create a question about this that would fit in a flashcard:\n ${blockContent}`) + const question = await promptLLM(`Create a question for a flashcard. Provide the question only. Here is the knowledge to check:\n ${blockContent}`) const answer = await promptLLM(`Given the question ${question} and the context of ${blockContent} What is the answer? be as brief as possible and provide the answer only.`) await logseq.Editor.updateBlock(questionBlock!.uuid, `${question} #card`) await delay(300) |
