Skip to content

Commit 13061d2

Browse files
authored
Merge pull request #1498 from OpenInterpreter/development
Development
2 parents eded6a2 + 30f5d67 commit 13061d2

File tree

3 files changed

+162
-31
lines changed

3 files changed

+162
-31
lines changed

interpreter/computer_use/loop.py

Lines changed: 143 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -7,10 +7,16 @@
77
import os
88
import platform
99
import time
10+
import traceback
1011
import uuid
1112
from collections.abc import Callable
1213
from datetime import datetime
13-
from enum import StrEnum
14+
15+
try:
16+
from enum import StrEnum
17+
except ImportError: # 3.10 compatibility
18+
from enum import Enum as StrEnum
19+
1420
from typing import Any, List, cast
1521

1622
import requests
@@ -33,10 +39,19 @@
3339

3440
BETA_FLAG = "computer-use-2024-10-22"
3541

42+
from typing import List, Optional
43+
44+
import uvicorn
45+
from fastapi import FastAPI
46+
from fastapi.responses import StreamingResponse
47+
from pydantic import BaseModel
3648
from rich import print as rich_print
3749
from rich.markdown import Markdown
3850
from rich.rule import Rule
3951

52+
# Add this near the top of the file, with other imports and global variables
53+
messages: List[BetaMessageParam] = []
54+
4055

4156
def print_markdown(message):
4257
"""
@@ -87,7 +102,7 @@ class APIProvider(StrEnum):
87102
* When using your bash tool with commands that are expected to output very large quantities of text, redirect into a tmp file and use str_replace_editor or `grep -n -B <lines before> -A <lines after> <query> <filename>` to confirm output.
88103
* When viewing a page it can be helpful to zoom out so that you can see everything on the page. Either that, or make sure you scroll down to see everything before deciding something isn't available.
89104
* When using your computer function calls, they take a while to run and send back to you. Where possible/feasible, try to chain multiple of these calls all into one function calls request.
90-
* The current date is {datetime.today().strftime('%A, %B %-d, %Y')}.
105+
* The current date is {datetime.today().strftime('%A, %B %d, %Y')}.
91106
</SYSTEM_CAPABILITY>
92107
93108
<IMPORTANT>
@@ -107,6 +122,7 @@ class APIProvider(StrEnum):
107122
SYSTEM_PROMPT = f"""<SYSTEM_CAPABILITY>
108123
* You are an AI assistant with access to a virtual machine running on {"Mac OS" if platform.system() == "Darwin" else platform.system()} with internet access.
109124
* When using your computer function calls, they take a while to run and send back to you. Where possible/feasible, try to chain multiple of these calls all into one function calls request.
125+
* The current date is {datetime.today().strftime('%A, %B %d, %Y')}.
110126
</SYSTEM_CAPABILITY>"""
111127

112128
# Update the SYSTEM_PROMPT for Mac OS
@@ -175,6 +191,8 @@ async def sampling_loop(
175191
elif isinstance(chunk, BetaRawContentBlockDeltaEvent):
176192
if chunk.delta.type == "text_delta":
177193
print(f"{chunk.delta.text}", end="", flush=True)
194+
yield {"type": "chunk", "chunk": chunk.delta.text}
195+
await asyncio.sleep(0)
178196
if current_block and current_block.type == "text":
179197
current_block.text += chunk.delta.text
180198
elif chunk.delta.type == "input_json_delta":
@@ -189,10 +207,13 @@ async def sampling_loop(
189207
# Finished a tool call
190208
# print()
191209
current_block.input = json.loads(current_block.partial_json)
210+
# yield {"type": "chunk", "chunk": current_block.input}
192211
delattr(current_block, "partial_json")
193212
else:
194213
# Finished a message
195214
print("\n")
215+
yield {"type": "chunk", "chunk": "\n"}
216+
await asyncio.sleep(0)
196217
response_content.append(current_block)
197218
current_block = None
198219

@@ -231,7 +252,9 @@ async def sampling_loop(
231252
tool_output_callback(result, content_block.id)
232253

233254
if not tool_result_content:
234-
return messages
255+
# Done!
256+
yield {"type": "messages", "messages": messages}
257+
break
235258

236259
messages.append({"content": tool_result_content, "role": "user"})
237260

@@ -334,6 +357,95 @@ async def main():
334357
provider = APIProvider.ANTHROPIC
335358
system_prompt_suffix = ""
336359

360+
# Check if running in server mode
361+
if "--server" in sys.argv:
362+
app = FastAPI()
363+
364+
# Start the mouse position checking thread when in server mode
365+
mouse_thread = threading.Thread(target=check_mouse_position)
366+
mouse_thread.daemon = True
367+
mouse_thread.start()
368+
369+
# Get API key from environment variable
370+
api_key = os.environ.get("ANTHROPIC_API_KEY")
371+
if not api_key:
372+
raise ValueError(
373+
"ANTHROPIC_API_KEY environment variable must be set when running in server mode"
374+
)
375+
376+
@app.post("/openai/chat/completions")
377+
async def chat_completion(request: ChatCompletionRequest):
378+
print("BRAND NEW REQUEST")
379+
# Check exit flag before processing request
380+
if exit_flag:
381+
return {"error": "Server shutting down due to mouse in corner"}
382+
383+
async def stream_response():
384+
print("is this even happening")
385+
386+
# Instead of creating converted_messages, append the last message to global messages
387+
global messages
388+
messages.append(
389+
{
390+
"role": request.messages[-1].role,
391+
"content": [
392+
{"type": "text", "text": request.messages[-1].content}
393+
],
394+
}
395+
)
396+
397+
response_chunks = []
398+
399+
async def output_callback(content_block: BetaContentBlock):
400+
chunk = f"data: {json.dumps({'choices': [{'delta': {'content': content_block.text}}]})}\n\n"
401+
response_chunks.append(chunk)
402+
yield chunk
403+
404+
async def tool_output_callback(result: ToolResult, tool_id: str):
405+
if result.output or result.error:
406+
content = result.output if result.output else result.error
407+
chunk = f"data: {json.dumps({'choices': [{'delta': {'content': content}}]})}\n\n"
408+
response_chunks.append(chunk)
409+
yield chunk
410+
411+
try:
412+
yield f"data: {json.dumps({'choices': [{'delta': {'role': 'assistant'}}]})}\n\n"
413+
414+
messages = [m for m in messages if m["content"]]
415+
print(str(messages)[-100:])
416+
await asyncio.sleep(4)
417+
418+
async for chunk in sampling_loop(
419+
model=model,
420+
provider=provider,
421+
system_prompt_suffix=system_prompt_suffix,
422+
messages=messages, # Now using global messages
423+
output_callback=output_callback,
424+
tool_output_callback=tool_output_callback,
425+
api_key=api_key,
426+
):
427+
if chunk["type"] == "chunk":
428+
await asyncio.sleep(0)
429+
yield f"data: {json.dumps({'choices': [{'delta': {'content': chunk['chunk']}}]})}\n\n"
430+
if chunk["type"] == "messages":
431+
messages = chunk["messages"]
432+
433+
yield f"data: {json.dumps({'choices': [{'delta': {'content': '', 'finish_reason': 'stop'}}]})}\n\n"
434+
435+
except Exception as e:
436+
print("Error: An exception occurred.")
437+
print(traceback.format_exc())
438+
pass
439+
# raise
440+
# print(f"Error: {e}")
441+
# yield f"data: {json.dumps({'error': str(e)})}\n\n"
442+
443+
return StreamingResponse(stream_response(), media_type="text/event-stream")
444+
445+
# Instead of running uvicorn here, we'll return the app
446+
return app
447+
448+
# Original CLI code continues here...
337449
print()
338450
print_markdown("Welcome to **Open Interpreter**.\n")
339451
print_markdown("---")
@@ -351,20 +463,22 @@ async def main():
351463
import random
352464

353465
tips = [
354-
"You can type `i` in your terminal to use Open Interpreter.",
355-
"Type `wtf` in your terminal to have Open Interpreter fix the last error.",
356-
"You can type prompts after `i` in your terminal, for example, `i want you to install node`. (Yes, really.)",
466+
# "You can type `i` in your terminal to use Open Interpreter.",
467+
"**Tip:** Type `wtf` in your terminal to have Open Interpreter fix the last error.",
468+
# "You can type prompts after `i` in your terminal, for example, `i want you to install node`. (Yes, really.)",
469+
"We recommend using our desktop app for the best experience. Type `d` for early access.",
470+
"**Tip:** Reduce display resolution for better performance.",
357471
]
358472

359473
random_tip = random.choice(tips)
360474

361475
markdown_text = f"""> Model set to `Claude 3.5 Sonnet (New)`, OS control enabled
362476
363-
We recommend using our desktop app for the best experience. Type `d` for early access.
477+
{random_tip}
364478
365479
**Warning:** This AI has full system access and can modify files, install software, and execute commands. By continuing, you accept all risks and responsibility.
366480
367-
Move your mouse to any corner of the screen to exit. Reduce display resolution for better performance.
481+
Move your mouse to any corner of the screen to exit.
368482
"""
369483

370484
print_markdown(markdown_text)
@@ -411,23 +525,30 @@ def tool_output_callback(result: ToolResult, tool_id: str):
411525
print(f"---\n{result.error}\n---")
412526

413527
try:
414-
messages = await sampling_loop(
528+
async for chunk in sampling_loop(
415529
model=model,
416530
provider=provider,
417531
system_prompt_suffix=system_prompt_suffix,
418532
messages=messages,
419533
output_callback=output_callback,
420534
tool_output_callback=tool_output_callback,
421535
api_key=api_key,
422-
)
536+
):
537+
if chunk["type"] == "messages":
538+
messages = chunk["messages"]
423539
except Exception as e:
424-
print(f"An error occurred: {e}")
540+
raise
425541

426542
# The thread will automatically terminate when the main program exits
427543

428544

429545
def run_async_main():
430-
asyncio.run(main())
546+
if "--server" in sys.argv:
547+
# Start uvicorn server directly without asyncio.run()
548+
app = asyncio.run(main())
549+
uvicorn.run(app, host="0.0.0.0", port=8000)
550+
else:
551+
asyncio.run(main())
431552

432553

433554
if __name__ == "__main__":
@@ -463,3 +584,13 @@ def check_mouse_position():
463584
print("\nMouse moved to corner. Exiting...")
464585
os._exit(0)
465586
threading.Event().wait(0.1) # Check every 100ms
587+
588+
589+
class ChatMessage(BaseModel):
590+
role: str
591+
content: str
592+
593+
594+
class ChatCompletionRequest(BaseModel):
595+
messages: List[ChatMessage]
596+
stream: Optional[bool] = False

poetry.lock

Lines changed: 16 additions & 16 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

0 commit comments

Comments
 (0)