aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--README.md3
-rw-r--r--package.json2
-rw-r--r--src/ollama.tsx14
3 files changed, 7 insertions, 12 deletions
diff --git a/README.md b/README.md
index 7fab0d2..0e4cf0f 100644
--- a/README.md
+++ b/README.md
@@ -9,13 +9,14 @@ A plugin to integrate [ollama](https://github.com/jmorganca/ollama) with [logseq
> Note: If you are on windows make sure to open WSL in the background for the model to work properly
# Features
-- The plugin currently has 5 commands
+- The plugin currently has 6 commands
- Ask Ai -> which is a prompt the AI freely without any context
- Ask Ai with context -> This is the same as Ask Ai but it gives the model the context of the current page
- Summarize -> Summarizs the whole page
- Summarize Block
- Create a flash card
- Divide a todo task into subtasks
+- Get model from block properties
- Respects theming
- Context menu commands
- Summarize Block
diff --git a/package.json b/package.json
index ae103fd..93d0d6e 100644
--- a/package.json
+++ b/package.json
@@ -1,6 +1,6 @@
{
"name": "ollama-logseq",
- "version": "1.0.7",
+ "version": "1.0.8",
"main": "dist/index.html",
"scripts": {
"dev": "vite",
diff --git a/src/ollama.tsx b/src/ollama.tsx
index a59b03b..38cb0f6 100644
--- a/src/ollama.tsx
+++ b/src/ollama.tsx
@@ -73,7 +73,6 @@ type OllamaGenerateParameters = {
}
async function ollamaGenerate(prompt: string, parameters?: OllamaGenerateParameters) {
-
if (!logseq.settings) {
throw new Error("Couldn't find ollama-logseq settings")
}
@@ -85,8 +84,6 @@ async function ollamaGenerate(prompt: string, parameters?: OllamaGenerateParamet
params.prompt = prompt
params.stream = false
- console.log(params)
-
try {
const response = await fetch(`http://${logseq.settings.host}/api/generate`, {
method: 'POST',
@@ -96,16 +93,14 @@ async function ollamaGenerate(prompt: string, parameters?: OllamaGenerateParamet
body: JSON.stringify(params)
})
if (!response.ok) {
- console.log("Error in Ollama request: " + response.statusText)
- logseq.UI.showMsg("Error in Ollama request")
+ logseq.UI.showMsg("Coudln't fulfull request make sure that ollama service is running and make sure there is no typo in host or model name")
throw new Error("Error in Ollama request: " + response.statusText)
}
const data = await response.json()
-
return data
} catch (e: any) {
- console.log(e)
- logseq.UI.showMsg("Error in Ollama request")
+ console.error("ERROR: ", e)
+ logseq.App.showMsg("Coudln't fulfull request make sure that ollama service is running and make sure there is no typo in host or model name")
}
}
@@ -126,9 +121,8 @@ async function promptLLM(prompt: string) {
}),
})
if (!response.ok) {
- console.log("Error: couldn't fulfill request")
logseq.App.showMsg("Coudln't fulfull request make sure that ollama service is running and make sure there is no typo in host or model name")
- throw new Error('Network response was not ok');
+ throw new Error("Error in Ollama request: " + response.statusText)
}
const data = await response.json();