Skip to content

Commit 08b8116

Browse files
committed
Web server now has separate Generate and Describe routes
1 parent b843e1b commit 08b8116

File tree

4 files changed

+38
-13
lines changed

4 files changed

+38
-13
lines changed

gpt-workflow-csharp/gpt-workflow-csharp-cli/Builder/DotBuilder.cs

+1-1
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ public string Build()
3333
sb.AppendLine("digraph G {");
3434

3535
foreach(var node in nodes)
36-
sb.AppendLine($" {GetNodeAsDot(node)}[shape={GetShape(node)}, label=\"{node.Identifier}\" ]");
36+
sb.AppendLine($" {GetNodeAsDot(node)}[shape={GetShape(node)}, label=\"{node.Identifier}\" ];");
3737

3838
foreach(var edge in edges)
3939
{

main_web_service.py

+25-9
Original file line numberDiff line numberDiff line change
@@ -1,15 +1,13 @@
11
from http.server import BaseHTTPRequestHandler, HTTPServer
2-
from random import randint
32
from urllib.parse import urlparse, parse_qs
3+
import urllib
44
import threading, socket
55

6-
from prompts_dot_graph_creator import EXPERT_COMMANDS
6+
from prompts_dot_graph_creator import EXPERT_COMMANDS, GetPromptToDescribeWorkflow, getExpertCommandToCreateDot, getExpertCommandToDescribeDot
77
import core
88
import config
99
import config_web
1010

11-
chain = core.create_command_messages(EXPERT_COMMANDS)
12-
1311
# Python web server with cookie-based session
1412
# based on https://davidgorski.ca/posts/sessions/
1513

@@ -21,7 +19,8 @@
2119
class MyServer(BaseHTTPRequestHandler):
2220
def do_GET(self):
2321
routes = {
24-
"/": self.bot
22+
"/generate-dot": self.bot_generate_dot,
23+
"/describe-dot": self.bot_describe_dot,
2524
}
2625
try:
2726
response = 200
@@ -40,13 +39,23 @@ def do_GET(self):
4039
self.write(content)
4140
return
4241

43-
def bot(self):
42+
def bot_describe_dot(self):
43+
global prompt_id
4444
user_prompt = self.parse_query_param("p")
45-
command_messages = core.create_command_messages(EXPERT_COMMANDS)
46-
rsp = core.execute_prompt(user_prompt, previous_messages, command_messages, prompt_id)
45+
user_prompt_wrapped = GetPromptToDescribeWorkflow(user_prompt)
46+
command_messages = core.create_command_messages([getExpertCommandToDescribeDot()])
47+
rsp = core.execute_prompt(user_prompt_wrapped, previous_messages, command_messages, prompt_id)
4748
prompt_id += 1
4849
return rsp
4950

51+
def bot_generate_dot(self):
52+
global prompt_id
53+
user_prompt = self.parse_query_param("p")
54+
command_messages = core.create_command_messages([getExpertCommandToCreateDot()])
55+
rsp = core.execute_prompt(user_prompt, previous_messages, command_messages, prompt_id)
56+
prompt_id += 1
57+
return rsp["human_output"] + "\n\n======\n\n" + rsp["dot"]
58+
5059
def parse_path(self):
5160
return urlparse(self.path).path
5261

@@ -76,6 +85,9 @@ def start_single_threaded():
7685
webServer.server_close()
7786
print("Server stopped.")
7887

88+
def quotify(text):
89+
return urllib.parse.quote(text)
90+
7991
def start_multi_threaded():
8092
# Multi-threaded server, else performance is terrible
8193
# ref https://stackoverflow.com/questions/46210672/python-2-7-streaming-http-server-supporting-multiple-connections-on-one-port
@@ -108,9 +120,13 @@ def run(self):
108120
pass
109121
httpd.server_close()
110122

123+
escaped_create_dot = quotify("Create a flow that makes a series of decisions about whether to approve a mortgage application")
124+
escaped_dot = quotify('digraph G { Start_Start_1[shape=ellipse, label="Start_1" ]; Decision_Experience[shape=Mdiamond, label="Experience" ]; Decision_Education[shape=Mdiamond, label="Education" ]; Decision_Skills[shape=Mdiamond, label="Skills" ]; End_Recommend[shape=rectangle, label="Recommend" ]; End_Not_Recommend[shape=rectangle, label="Not_Recommend" ]; Start_Start_1 -> Decision_Experience; Decision_Experience -> Decision_Education [label="true"]; Decision_Experience -> End_Not_Recommend [label="false"]; Decision_Education -> Decision_Skills [label="true"]; Decision_Education -> End_Not_Recommend [label="false"]; Decision_Skills -> End_Recommend [label="true"]; Decision_Skills -> End_Not_Recommend [label="false"];}")}')
125+
111126
print(f"Server started at http://{config_web.HOSTNAME}:{config_web.PORT} - {config_web.WEB_SERVER_THREADS} threads")
112127
print("Please set the 'p' query parameter to be the user's prompt.")
113-
print("example: http://localhost:8083/?p=I%20need%20a%20make%20a%20Car%20Parts%20application")
128+
print(f"- generate DOT example: http://localhost:8083/generate-dot?p={escaped_create_dot}")
129+
print(f"- describe DOT example: http://localhost:8083/describe-dot?p={escaped_dot}");
114130
print("[press any key to stop]")
115131
[Thread(i) for i in range(config_web.WEB_SERVER_THREADS)]
116132
input("Press ENTER to kill server")

prompts_dot_graph_creator.py

+8-2
Original file line numberDiff line numberDiff line change
@@ -45,12 +45,18 @@
4545
- use clear natural language, in a concise, friendly tone.
4646
"""
4747

48+
def getExpertCommandToCreateDot():
49+
return Command('create_dot_workflow', create_dot_flowchart__expert_template, "Good for answering questions about creating a workflow in DOT notation")
50+
51+
def getExpertCommandToDescribeDot():
52+
return Command('describe_dot_workflow', describe_dot_flowchart__expert_template, "Good for describing a workflow given in DOT notation, summarizing its activity and its general purpose")
53+
4854
# Each expert is a prompt that knows how to handle one type of user input
4955
EXPERT_COMMANDS = [
50-
Command('describe_dot_workflow', describe_dot_flowchart__expert_template, "Good for describing a workflow given in DOT notation, summarizing its activity and its general purpose"),
56+
getExpertCommandToDescribeDot(),
5157
# Placing this last, so that its IMPORTANT message about whitelist is not ignored (LLMs tend to ignore content in middle)
5258
# An approach like LangChain's MULTI_PROMPT_ROUTER_TEMPLATE would avoid this problem.
53-
Command('create_dot_workflow', create_dot_flowchart__expert_template, "Good for answering questions about creating a workflow in DOT notation"),
59+
getExpertCommandToCreateDot(),
5460
]
5561

5662
def GetPromptToDescribeWorkflow(dotText):

service_dot_parser.py

+4-1
Original file line numberDiff line numberDiff line change
@@ -38,4 +38,7 @@ def parse_dot_and_return_human(rsp, prompt_id):
3838
write_dot_to_file(dot_string, prompt_id)
3939
generate_png_from_dot(dot_string, prompt_id)
4040

41-
return human_output
41+
return {
42+
"human_output": human_output,
43+
"dot": dot_string
44+
}

0 commit comments

Comments
 (0)