1
0
mirror of https://github.com/Jermolene/TiddlyWiki5 synced 2025-02-08 23:20:03 +00:00

Refactor completion servers so that they handle their own response

This commit is contained in:
Jeremy Ruston 2024-07-21 16:51:19 +01:00
parent fb641d340c
commit 370ff3057e
3 changed files with 80 additions and 43 deletions

View File

@ -14,49 +14,26 @@ statusTitle - Optional title of a tiddler to which the status of the request wil
completionServer - Optional URL of server completionServer - Optional URL of server
--> -->
\procedure get-llm-completion(conversationTitle,resultTitlePrefix,resultTags,statusTitle,completionServer) \procedure get-llm-completion(conversationTitle,resultTitlePrefix,resultTags,statusTitle,completionServer)
<!--
Callback for the HTTP response from the LLM
-->
\procedure get-llm-completion-callback()
<%if [<status>compare:number:gteq[200]compare:number:lteq[299]] %>
<!-- Success -->
<$action-createtiddler
$basetitle=<<resultTitlePrefix>>
tags=<<resultTags>>
type="text/markdown"
role={{{ [<data>jsonget[choices],[0],[message],[role]] }}}
text={{{ [<data>jsonget[choices],[0],[message],[content]] }}}
/>
<%else%>
<!-- Error -->
<$action-createtiddler
$basetitle=<<resultTitlePrefix>>
tags=<<resultTags>>
type="text/markdown"
role="error"
text={{{ [[Error:]] [<statusText>] [<data>jsonget[error],[message]] +[join[]] }}}
/>
<%endif%>
\end get-llm-completion-callback
<$let <$let
completionServer={{{ [<completionServer>!is[blank]else<default-llm-completion-server>] }}} completionServer={{{ [<completionServer>!is[blank]else<default-llm-completion-server>] }}}
> >
<$wikify name="json" text={{{ [<completionServer>get[text]] }}}> <$importvariables filter="[<completionServer>]">
<$action-log message="get-llm-completion"/> <$wikify name="json" text=<<json-prompt>>>
<$action-log/> <$action-log message="get-llm-completion"/>
<$action-sendmessage <$action-log/>
$message="tm-http-request" <$action-sendmessage
url={{{ [<completionServer>get[url]] }}} $message="tm-http-request"
body=<<json>> url={{{ [<completionServer>get[url]] }}}
header-content-type="application/json" body=<<json>>
bearer-auth-token-from-store="openai-secret-key" header-content-type="application/json"
method="POST" bearer-auth-token-from-store="openai-secret-key"
oncompletion=<<get-llm-completion-callback>> method="POST"
bind-status=<<statusTitle>> oncompletion=<<completion-callback>>
var-resultTitlePrefix=<<resultTitlePrefix>> bind-status=<<statusTitle>>
var-resultTags=<<resultTags>> var-resultTitlePrefix=<<resultTitlePrefix>>
/> var-resultTags=<<resultTags>>
</$wikify> />
</$wikify>
</$importvariables>
</$let> </$let>
\end get-llm-completion \end get-llm-completion

View File

@ -3,6 +3,10 @@ tags: $:/tags/AI/CompletionServer
url: http://127.0.0.1:8080/v1/chat/completions url: http://127.0.0.1:8080/v1/chat/completions
caption: Locally running Llamafile server caption: Locally running Llamafile server
<!--
Wikified JSON text to be sent to server
-->
\procedure json-prompt()
\rules only filteredtranscludeinline transcludeinline macrodef macrocallinline html conditional commentblock commentinline \rules only filteredtranscludeinline transcludeinline macrodef macrocallinline html conditional commentblock commentinline
{ {
"model": "gpt-4o", "model": "gpt-4o",
@ -21,4 +25,30 @@ caption: Locally running Llamafile server
} }
</$list> </$list>
] ]
} }
\end json-prompt
<!--
Callback for the HTTP response from the LLM
-->
\procedure completion-callback()
<%if [<status>compare:number:gteq[200]compare:number:lteq[299]] %>
<!-- Success -->
<$action-createtiddler
$basetitle=<<resultTitlePrefix>>
tags=<<resultTags>>
type="text/markdown"
role={{{ [<data>jsonget[choices],[0],[message],[role]] }}}
text={{{ [<data>jsonget[choices],[0],[message],[content]] }}}
/>
<%else%>
<!-- Error -->
<$action-createtiddler
$basetitle=<<resultTitlePrefix>>
tags=<<resultTags>>
type="text/markdown"
role="error"
text={{{ [[Error:]] [<statusText>] [<data>jsonget[error],[message]] +[join[]] }}}
/>
<%endif%>
\end completion-callback

View File

@ -3,6 +3,10 @@ tags: $:/tags/AI/CompletionServer
url: https://api.openai.com/v1/chat/completions url: https://api.openai.com/v1/chat/completions
caption: OpenAI Service caption: OpenAI Service
<!--
Wikified JSON text to be sent to server
-->
\procedure json-prompt()
\rules only filteredtranscludeinline transcludeinline macrodef macrocallinline html conditional commentblock commentinline \rules only filteredtranscludeinline transcludeinline macrodef macrocallinline html conditional commentblock commentinline
{ {
"model": "gpt-4o", "model": "gpt-4o",
@ -36,4 +40,30 @@ caption: OpenAI Service
} }
</$list> </$list>
] ]
} }
\end json-prompt
<!--
Callback for the HTTP response from the LLM
-->
\procedure completion-callback()
<%if [<status>compare:number:gteq[200]compare:number:lteq[299]] %>
<!-- Success -->
<$action-createtiddler
$basetitle=<<resultTitlePrefix>>
tags=<<resultTags>>
type="text/markdown"
role={{{ [<data>jsonget[choices],[0],[message],[role]] }}}
text={{{ [<data>jsonget[choices],[0],[message],[content]] }}}
/>
<%else%>
<!-- Error -->
<$action-createtiddler
$basetitle=<<resultTitlePrefix>>
tags=<<resultTags>>
type="text/markdown"
role="error"
text={{{ [[Error:]] [<statusText>] [<data>jsonget[error],[message]] +[join[]] }}}
/>
<%endif%>
\end completion-callback