Skip to content

Commit 0121be4

Browse files
committed
Initial working version - ouptuts DOT format in response
1 parent 419809e commit 0121be4

15 files changed

+642
-1
lines changed

README.md

+259-1
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,260 @@
11
# gpt-workflow
2-
Generate workflows (for flowcharts or low code) via LLM
2+
Generate workflows (for flowcharts or low code) via LLM.
3+
4+
## Approach: generate DOT notation as a simple format to represent a workflow
5+
6+
The DOT graph format (as used by tools like graphviz) is a simple way to represent a flow chart.
7+
The DOT script generated by the LLM can be further processed, for example by generating a flow chart image OR by populating some kind of workflow system.
8+
9+
# Example Execution
10+
11+
```
12+
[[[TEST Simple workflow to decide the animal type]]]
13+
---
14+
>> Create a flow that makes a series of decisions about whether to approve a mortgage application
15+
digraph G {
16+
17+
// start
18+
start [shape=ellipse, label="Start"];
19+
20+
// decision_credit_score
21+
start -> decision_credit_score;
22+
decision_credit_score [shape=Mdiamond, label="Credit Score > 700?"];
23+
24+
// decision_income
25+
decision_credit_score -> decision_income;
26+
decision_income [shape=Mdiamond, label="Income > $50,000?"];
27+
28+
// decision_employment
29+
decision_income -> decision_employment;
30+
decision_employment [shape=Mdiamond, label="Employment > 2 years?"];
31+
32+
// decision_down_payment
33+
decision_employment -> decision_down_payment;
34+
decision_down_payment [shape=Mdiamond, label="Down Payment > 20%?"];
35+
36+
// approve
37+
decision_down_payment -> approve;
38+
approve [shape=box, label="Approve"];
39+
40+
// reject
41+
decision_credit_score -> reject;
42+
reject [shape=box, label="Reject"];
43+
44+
decision_income -> reject;
45+
decision_employment -> reject;
46+
decision_down_payment -> reject;
47+
}
48+
---
49+
>> Create a flow that makes a series of decisions about whether to recommend a job interview candidate.
50+
digraph G {
51+
52+
// start
53+
start [shape=ellipse, label="Start"];
54+
55+
// decision_experience
56+
start -> decision_experience;
57+
decision_experience [shape=Mdiamond, label="Has relevant experience?"];
58+
59+
// decision_education
60+
decision_experience -> decision_education;
61+
decision_education [shape=Mdiamond, label="Has required education?"];
62+
63+
// decision_skills
64+
decision_education -> decision_skills;
65+
decision_skills [shape=Mdiamond, label="Has necessary skills?"];
66+
67+
// decision_references
68+
decision_skills -> decision_references;
69+
decision_references [shape=Mdiamond, label="Has positive references?"];
70+
71+
// recommend
72+
decision_references -> recommend;
73+
recommend [shape=box, label="Recommend for interview"];
74+
75+
// reject
76+
decision_experience -> reject;
77+
reject [shape=box, label="Reject"];
78+
79+
decision_education -> reject;
80+
decision_skills -> reject;
81+
decision_references -> reject;
82+
}
83+
---
84+
>> Create a flow that makes a series of decisions about an animal, to decide what kind of animal is it
85+
digraph G {
86+
87+
// start
88+
start [shape=ellipse, label="Start"];
89+
90+
// decision_has_feathers
91+
start -> decision_has_feathers;
92+
decision_has_feathers [shape=Mdiamond, label="Has feathers?"];
93+
94+
// decision_can_fly
95+
decision_has_feathers -> decision_can_fly;
96+
decision_can_fly [shape=Mdiamond, label="Can fly?"];
97+
98+
// decision_has_fins
99+
decision_has_feathers -> decision_has_fins;
100+
decision_has_fins [shape=Mdiamond, label="Has fins?"];
101+
102+
// Hawk
103+
decision_can_fly -> Hawk;
104+
Hawk [shape=box, label="Hawk"];
105+
106+
// Penguin
107+
decision_can_fly -> Penguin;
108+
Penguin [shape=box, label="Penguin"];
109+
110+
// Dolphin
111+
decision_has_fins -> Dolphin;
112+
Dolphin [shape=box, label="Dolphin"];
113+
114+
// Bear
115+
decision_has_fins -> Bear;
116+
Bear [shape=box, label="Bear"];
117+
}
118+
[[[TEST Simple workflow adding an item to a list]]]
119+
---
120+
>> Create a flow that takes a list and adds an item of the same type
121+
digraph G {
122+
123+
// start
124+
start [shape=ellipse, label="Start"];
125+
126+
// add_item
127+
add_item [shape=box, label="Add Item"];
128+
129+
// end
130+
end [shape=ellipse, label="End"];
131+
132+
// start -> add_item
133+
start -> add_item;
134+
135+
// add_item -> end
136+
add_item -> end;
137+
138+
add_item [shape=box, label="Add Item"];
139+
}
140+
---
141+
>> Create a flow that takes two lists and concatenates them
142+
digraph G {
143+
144+
// start
145+
start [shape=ellipse, label="Start"];
146+
147+
// concatenate_lists
148+
concatenate_lists [shape=box, label="Concatenate Lists"];
149+
150+
// end
151+
end [shape=ellipse, label="End"];
152+
153+
// start -> concatenate_lists
154+
start -> concatenate_lists;
155+
156+
// concatenate_lists -> end
157+
concatenate_lists -> end;
158+
159+
concatenate_lists [shape=box, label="Concatenate Lists"];
160+
}
161+
---
162+
>> Create a flow that takes a list and an object. Call another flow to get a boolean result. If the boolean is true, then add the item to the list.
163+
digraph G {
164+
165+
// start
166+
start [shape=ellipse, label="Start"];
167+
168+
// call_flow
169+
call_flow [shape=box, label="Call Flow"];
170+
171+
// decision_boolean
172+
decision_boolean [shape=diamond, label="Boolean Result?"];
173+
174+
// add_item
175+
add_item [shape=box, label="Add Item"];
176+
177+
// end
178+
end [shape=ellipse, label="End"];
179+
180+
// start -> call_flow
181+
start -> call_flow;
182+
183+
// call_flow -> decision_boolean
184+
call_flow -> decision_boolean;
185+
186+
// decision_boolean -> add_item [label="true"];
187+
decision_boolean -> add_item [label="true"];
188+
189+
// decision_boolean -> end [label="false"];
190+
decision_boolean -> end [label="false"];
191+
192+
// add_item -> end
193+
add_item -> end;
194+
195+
call_flow [shape=box, label="Call Flow"];
196+
}
197+
[[[TEST Irrelevant prompts]]]
198+
---
199+
>> what is 2 + 5 divided by 10 ?
200+
I'm sorry, but I can only assist with questions related to creating a flow chart.
201+
---
202+
>> Who won the battle of Agincourt, and why was it fought?
203+
I'm sorry, but I can only assist with questions related to creating a flow chart.
204+
---
205+
>> What is my favourite color?
206+
I'm sorry, but I don't have access to personal information.
207+
```
208+
209+
## Dependencies
210+
211+
- Requires an LLM - by default, uses OpenAI's ChatGPT.
212+
- Python 3
213+
- [graphviz](https://www.graphviz.org/#download)
214+
215+
## Usage
216+
217+
To use as a CLI (Command Line Interface) REPL (Read-Eval-Print Loop) prompt:
218+
```go.sh```
219+
220+
or to use as a web server:
221+
222+
```go_web.sh```
223+
224+
For the web server, you need to pass the user prompt as GET query parameter 'p'.
225+
226+
Example:
227+
228+
- http://localhost:8083/?p=I%20need%20a%20make%20a%20Car%20Parts%20application
229+
230+
So, another application can use the web server to send in natural language prompts from the user, and receive response in the graphviz DOT format.
231+
232+
The other application can then generate an image or some kind of workflow, from the DOT script.
233+
234+
## Set up
235+
236+
```
237+
pip3 install --upgrade openai pydot
238+
```
239+
240+
Set environment variable with your OpenAI key:
241+
242+
```
243+
export OPENAI_API_KEY="xxx"
244+
```
245+
246+
Add that to your shell initializing script (`~/.zprofile` or similar)
247+
248+
Load in current terminal:
249+
250+
```
251+
source ~/.zprofile
252+
```
253+
254+
## Test
255+
256+
`test.sh`
257+
258+
or
259+
260+
`python test.py`

command.py

+5
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
class Command:
2+
def __init__(self, name, expert_template, description) -> None:
3+
self.name = name
4+
self.expert_template = expert_template
5+
self.description = description

config.py

+1
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
is_debug = False

config__database_schema_creator.py

+42
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,42 @@
1+
from command import Command
2+
3+
# note: Instead of this steps-in-single-prompt approach, we could have an inter-dependent chain, to collect info about the app, THEN try to generate.
4+
# BUT the step-by-step approach works really well, at least with Chat-GPT3.5 Turbo.
5+
6+
create_dot_flowchart__expert_template = """You are Workflow Creator Bot, a bot that knows how to create a simple DOT format flow chart.
7+
You are great at answering questions about creating and altering a flow chart.
8+
9+
When you don't know the answer to a question, do not answer.
10+
11+
You are an AI assistant to assist an application developer with the creation of the flow chart via natural language input.
12+
13+
The output MUST be in DOT format as used by the graphviz tool only, based on the following example:
14+
```
15+
digraph G {
16+
17+
// decision_has_feathers
18+
start -> decision_has_feathers;
19+
decision_has_feathers -> decision_can_fly
20+
decision_has_feathers -> decision_has_fins
21+
22+
// decision_can_fly
23+
decision_can_fly -> Hawk
24+
decision_can_fly -> Penguin
25+
26+
// decision_has_fins
27+
decision_has_fins -> Dolphin
28+
decision_has_fins -> Bear
29+
30+
decision_has_feathers [shape=Mdiamond, label="Has feathers?"];
31+
decision_can_fly [shape=Mdiamond, label="Can fly?"];
32+
decision_has_fins [shape=Mdiamond, label="Has fins?"];
33+
}
34+
```
35+
36+
IMPORTANT: Only output valid DOT format as used by the graphviz tool.
37+
"""
38+
39+
# Each expert is a prompt that knows how to handle one type of user input
40+
EXPERT_COMMANDS = [
41+
Command('create_dot_workflow', create_dot_flowchart__expert_template, "Good for answering questions about creating a workflow in DOT notation")
42+
]

config_web.py

+5
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
HOSTNAME = "localhost"
2+
PORT = 8083
3+
WEB_SERVER_THREADS = 100
4+
5+
is_debug = False

core.py

+26
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,26 @@
1+
import service_chat
2+
3+
# DEV note: NOT using langchain.
4+
# Tried using langchain but it's validation gets in the way of more complex prompts.
5+
# And seems simpler to code direct, not via a complicated framework.
6+
7+
def create_command_messages(expert_commands):
8+
messages = []
9+
for command in expert_commands:
10+
messages.append({'role':'system', 'content': command.expert_template })
11+
12+
return messages
13+
14+
def execute_prompt(user_prompt, previous_messages, command_messages):
15+
# TODO: Route to the right 'expert' chain
16+
# Falls back to the default chain, which means sending the plain user prompt to the LLM
17+
18+
user_message = {'role':'user', 'content': user_prompt }
19+
20+
messages = command_messages + previous_messages + [user_message]
21+
22+
rsp = service_chat.send_prompt_messages(messages)
23+
24+
previous_messages.append(user_message)
25+
previous_messages.append({'role':'assistant', 'content': rsp })
26+
return rsp

go.sh

+1
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
python -W "ignore" main_cli.py

go_web.sh

+1
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
python3 -W "ignore" main_web_service.py

0 commit comments

Comments
 (0)