Eteims commited on
Commit
17f0653
Β·
verified Β·
1 Parent(s): ed0a70e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +131 -63
app.py CHANGED
@@ -2,10 +2,14 @@ import gradio as gr
2
  from openai import OpenAI
3
  import os
4
  import json
5
- from novita_sandbox.code_interpreter import Sandbox
 
6
  import atexit
 
7
 
8
- # --- Initialization ---
 
 
9
  client = OpenAI(
10
  base_url="https://api.novita.ai/openai",
11
  api_key=os.environ["NOVITA_API_KEY"],
@@ -13,12 +17,55 @@ client = OpenAI(
13
 
14
  model = "meta-llama/llama-3.3-70b-instruct"
15
 
16
- # Create sandbox
17
- sandbox = Sandbox.create(timeout=1200)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
 
19
- # --- Tool functions ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
  def read_file(path: str):
21
  print(f"[DEBUG] read_file called with path: {path}")
 
 
 
 
22
  try:
23
  content = sandbox.files.read(path)
24
  print(f"[DEBUG] read_file result: {content}")
@@ -29,6 +76,10 @@ def read_file(path: str):
29
 
30
  def write_file(path: str, data: str):
31
  print(f"[DEBUG] write_file called with path: {path}")
 
 
 
 
32
  try:
33
  sandbox.files.write(path, data)
34
  msg = f"File created successfully at {path}"
@@ -40,6 +91,10 @@ def write_file(path: str, data: str):
40
 
41
  def write_files(files: list):
42
  print(f"[DEBUG] write_files called with {len(files)} files")
 
 
 
 
43
  try:
44
  sandbox.files.write_files(files)
45
  msg = f"{len(files)} file(s) created successfully"
@@ -51,15 +106,21 @@ def write_files(files: list):
51
 
52
  def run_commands(command: str):
53
  print(f"[DEBUG] run_commands called with command: {command}")
 
 
 
 
54
  try:
55
  result = sandbox.commands.run(command)
56
- print(f"[DEBUG] run_commands result: {result}")
57
  return result.stdout
58
  except Exception as e:
59
  print(f"[DEBUG] run_commands error: {e}")
60
  return f"Error running command: {e}"
61
 
62
- # --- Register tools ---
 
 
63
  tools = [
64
  {
65
  "type": "function",
@@ -116,33 +177,31 @@ tools = [
116
  "type": "function",
117
  "function": {
118
  "name": "run_commands",
119
- "description": "Run a single shell command inside the sandbox working directory",
120
  "parameters": {
121
  "type": "object",
122
- "properties": {
123
- "command": {"type": "string"},
124
- },
125
  "required": ["command"],
126
  },
127
  },
128
  },
129
  ]
130
 
131
- # --- Persistent chat messages ---
 
 
132
  messages = []
133
 
134
- # --- Global model setter ---
135
  def set_model(selected_model):
136
  global model
137
  model = selected_model
138
- print(f"[DEBUG] Model switched to: {model}")
139
  return f"βœ… Model switched to **{model}**"
140
 
141
  def chat_fn(user_message, history):
142
  global messages, model
 
143
  messages.append({"role": "user", "content": user_message})
144
 
145
- # Send to model
146
  response = client.chat.completions.create(
147
  model=model,
148
  messages=messages,
@@ -152,72 +211,89 @@ def chat_fn(user_message, history):
152
  assistant_msg = response.choices[0].message
153
  messages.append(assistant_msg)
154
 
155
- output_text = ""
156
-
157
  if assistant_msg.tool_calls:
158
  print(f"[DEBUG] Assistant requested {len(assistant_msg.tool_calls)} tool call(s).")
159
 
160
- for tool_call in assistant_msg.tool_calls:
161
- fn_name = tool_call.function.name
162
- fn_args = json.loads(tool_call.function.arguments)
163
- print(f"[DEBUG] Tool call detected: {fn_name} with args {fn_args}")
 
164
 
165
  if fn_name == "read_file":
166
- fn_result = read_file(**fn_args)
167
  elif fn_name == "write_file":
168
- fn_result = write_file(**fn_args)
169
  elif fn_name == "write_files":
170
- fn_result = write_files(**fn_args)
171
  elif fn_name == "run_commands":
172
- fn_result = run_commands(**fn_args)
173
  else:
174
- fn_result = f"Error: Unknown tool {fn_name}"
 
 
175
 
176
  messages.append({
177
- "tool_call_id": tool_call.id,
178
  "role": "tool",
179
- "content": str(fn_result),
180
  })
181
 
182
- follow_up = client.chat.completions.create(
183
  model=model,
184
  messages=messages,
185
  )
186
- final_answer = follow_up.choices[0].message
187
- messages.append(final_answer)
188
- output_text = final_answer.content
189
- else:
190
- output_text = assistant_msg.content
191
 
192
- return output_text
193
 
194
- # --- Command Interface function ---
 
 
195
  def execute_command(command):
196
  if not command.strip():
197
  return "⚠️ Please enter a command."
198
- print(f"[DEBUG] Executing command from interface: {command}")
199
  output = run_commands(command)
200
- return f"```bash\n{output}\n```" if output else "βœ… Command executed (no output)."
201
 
202
- # --- Gradio UI ---
 
 
203
  with gr.Blocks(title="Novita Sandbox App") as demo:
204
  gr.Markdown("## 🧠 Novita Sandbox Agent")
205
- gr.Markdown(
206
- "This app is an AI-powered **code agent** that lets you chat with intelligent assistants backed by **Novita AI LLMs**. These agents can write, read, and execute code safely inside a **Novita sandbox**, providing a secure environment for running commands, testing scripts, and managing files, all through an intuitive chat interface with model selection and command execution built right in."
207
- )
208
-
209
 
210
  with gr.Row(equal_height=True):
211
- # Left: Chat Interface
212
  with gr.Column(scale=2):
213
  gr.Markdown("### πŸ’¬ Chat Interface")
214
  gr.ChatInterface(chat_fn)
215
 
216
- # Right: Command Interface
217
  with gr.Column(scale=1):
218
- gr.Markdown("### πŸ’» Command Interface")
219
-
220
- # Model selector
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
221
  model_selector = gr.Dropdown(
222
  label="Select Model",
223
  choices=[
@@ -228,26 +304,18 @@ with gr.Blocks(title="Novita Sandbox App") as demo:
228
  "moonshotai/kimi-k2-instruct",
229
  ],
230
  value=model,
231
- interactive=True,
232
  )
233
-
234
- model_status = gr.Markdown(f"βœ… Current model: **{model}**")
235
  model_selector.change(set_model, inputs=model_selector, outputs=model_status)
236
 
237
- command_input = gr.Textbox(
238
- label="Command",
239
- placeholder="e.g., ls, python main.py",
240
- lines=1,
241
- )
242
- with gr.Row():
243
- run_btn = gr.Button("Run", variant="primary", scale=0)
244
- command_output = gr.Markdown("Command output will appear here...")
245
-
246
  run_btn.click(execute_command, inputs=command_input, outputs=command_output)
247
 
248
- # --- Cleanup on exit ---
249
- atexit.register(lambda: (sandbox.kill(), print("[DEBUG] Sandbox terminated. πŸ‘‹")))
250
 
251
  if __name__ == "__main__":
252
  demo.launch()
253
-
 
2
  from openai import OpenAI
3
  import os
4
  import json
5
+ import threading
6
+ import time
7
  import atexit
8
+ from novita_sandbox.code_interpreter import Sandbox
9
 
10
+ # -------------------------
11
+ # Global State
12
+ # -------------------------
13
  client = OpenAI(
14
  base_url="https://api.novita.ai/openai",
15
  api_key=os.environ["NOVITA_API_KEY"],
 
17
 
18
  model = "meta-llama/llama-3.3-70b-instruct"
19
 
20
+ sandbox = None
21
+ sandbox_timer = None
22
+
23
+ # -------------------------
24
+ # Sandbox Management
25
+ # -------------------------
26
+ def create_sandbox():
27
+ global sandbox
28
+ if sandbox is None:
29
+ sandbox = Sandbox.create(timeout=1200)
30
+ print("[DEBUG] Sandbox created.")
31
+ return "🟒 Sandbox ON"
32
+ return "Sandbox already running."
33
+
34
+ def kill_sandbox():
35
+ global sandbox
36
+ if sandbox is not None:
37
+ try:
38
+ sandbox.kill()
39
+ except Exception:
40
+ pass
41
+ sandbox = None
42
+ print("[DEBUG] Sandbox killed.")
43
+ return "πŸ”΄ Sandbox OFF"
44
+ return "Sandbox already off."
45
 
46
+ def sandbox_auto_off():
47
+ print("[DEBUG] Auto-off countdown started (1200 sec).")
48
+ time.sleep(1200)
49
+ print("[DEBUG] Auto-off triggered.")
50
+ kill_sandbox()
51
+
52
+ # -------------------------
53
+ # Sandbox Usage Guard
54
+ # -------------------------
55
+ def require_sandbox():
56
+ if sandbox is None:
57
+ return "❌ Sandbox is OFF. Turn it ON to use this feature."
58
+ return None
59
+
60
+ # -------------------------
61
+ # Tool Functions
62
+ # -------------------------
63
  def read_file(path: str):
64
  print(f"[DEBUG] read_file called with path: {path}")
65
+ err = require_sandbox()
66
+ if err:
67
+ return err
68
+
69
  try:
70
  content = sandbox.files.read(path)
71
  print(f"[DEBUG] read_file result: {content}")
 
76
 
77
  def write_file(path: str, data: str):
78
  print(f"[DEBUG] write_file called with path: {path}")
79
+ err = require_sandbox()
80
+ if err:
81
+ return err
82
+
83
  try:
84
  sandbox.files.write(path, data)
85
  msg = f"File created successfully at {path}"
 
91
 
92
  def write_files(files: list):
93
  print(f"[DEBUG] write_files called with {len(files)} files")
94
+ err = require_sandbox()
95
+ if err:
96
+ return err
97
+
98
  try:
99
  sandbox.files.write_files(files)
100
  msg = f"{len(files)} file(s) created successfully"
 
106
 
107
  def run_commands(command: str):
108
  print(f"[DEBUG] run_commands called with command: {command}")
109
+ err = require_sandbox()
110
+ if err:
111
+ return err
112
+
113
  try:
114
  result = sandbox.commands.run(command)
115
+ print(f"[DEBUG] run_commands result: {result.stdout}")
116
  return result.stdout
117
  except Exception as e:
118
  print(f"[DEBUG] run_commands error: {e}")
119
  return f"Error running command: {e}"
120
 
121
+ # -------------------------
122
+ # Register tools
123
+ # -------------------------
124
  tools = [
125
  {
126
  "type": "function",
 
177
  "type": "function",
178
  "function": {
179
  "name": "run_commands",
180
+ "description": "Run a shell command inside the sandbox",
181
  "parameters": {
182
  "type": "object",
183
+ "properties": {"command": {"type": "string"}},
 
 
184
  "required": ["command"],
185
  },
186
  },
187
  },
188
  ]
189
 
190
+ # -------------------------
191
+ # Chat + Tool Call Debug
192
+ # -------------------------
193
  messages = []
194
 
 
195
  def set_model(selected_model):
196
  global model
197
  model = selected_model
 
198
  return f"βœ… Model switched to **{model}**"
199
 
200
  def chat_fn(user_message, history):
201
  global messages, model
202
+
203
  messages.append({"role": "user", "content": user_message})
204
 
 
205
  response = client.chat.completions.create(
206
  model=model,
207
  messages=messages,
 
211
  assistant_msg = response.choices[0].message
212
  messages.append(assistant_msg)
213
 
214
+ # DEBUG tool call logging
 
215
  if assistant_msg.tool_calls:
216
  print(f"[DEBUG] Assistant requested {len(assistant_msg.tool_calls)} tool call(s).")
217
 
218
+ for tc in assistant_msg.tool_calls:
219
+ print(f"[DEBUG] Tool call detected: {tc.function.name} with args {tc.function.arguments}")
220
+
221
+ fn_name = tc.function.name
222
+ fn_args = json.loads(tc.function.arguments)
223
 
224
  if fn_name == "read_file":
225
+ result = read_file(**fn_args)
226
  elif fn_name == "write_file":
227
+ result = write_file(**fn_args)
228
  elif fn_name == "write_files":
229
+ result = write_files(**fn_args)
230
  elif fn_name == "run_commands":
231
+ result = run_commands(**fn_args)
232
  else:
233
+ result = f"Unknown tool {fn_name}"
234
+
235
+ print(f"[DEBUG] Tool call result: {result}")
236
 
237
  messages.append({
238
+ "tool_call_id": tc.id,
239
  "role": "tool",
240
+ "content": str(result),
241
  })
242
 
243
+ followup = client.chat.completions.create(
244
  model=model,
245
  messages=messages,
246
  )
247
+ final_msg = followup.choices[0].message
248
+ messages.append(final_msg)
249
+ return final_msg.content
 
 
250
 
251
+ return assistant_msg.content
252
 
253
+ # -------------------------
254
+ # Command Interface
255
+ # -------------------------
256
  def execute_command(command):
257
  if not command.strip():
258
  return "⚠️ Please enter a command."
 
259
  output = run_commands(command)
260
+ return f"```bash\n{output}\n```"
261
 
262
+ # -------------------------
263
+ # UI
264
+ # -------------------------
265
  with gr.Blocks(title="Novita Sandbox App") as demo:
266
  gr.Markdown("## 🧠 Novita Sandbox Agent")
267
+ gr.Markdown("Interact with a Novita sandbox-enabled LLM with code execution capability.")
 
 
 
268
 
269
  with gr.Row(equal_height=True):
270
+ # Chat
271
  with gr.Column(scale=2):
272
  gr.Markdown("### πŸ’¬ Chat Interface")
273
  gr.ChatInterface(chat_fn)
274
 
275
+ # Controls
276
  with gr.Column(scale=1):
277
+ gr.Markdown("### βš™οΈ Controls")
278
+
279
+ # Sandbox Switch
280
+ sandbox_switch = gr.Checkbox(label="Sandbox On/Off", value=False)
281
+ sandbox_status = gr.Markdown("πŸ”΄ Sandbox OFF")
282
+
283
+ def toggle_sandbox(is_on):
284
+ global sandbox_timer
285
+ if is_on:
286
+ msg = create_sandbox()
287
+ sandbox_timer = threading.Thread(target=sandbox_auto_off, daemon=True)
288
+ sandbox_timer.start()
289
+ return "🟒 Sandbox ON"
290
+ else:
291
+ msg = kill_sandbox()
292
+ return "πŸ”΄ Sandbox OFF"
293
+
294
+ sandbox_switch.change(toggle_sandbox, inputs=sandbox_switch, outputs=sandbox_status)
295
+
296
+ # Model Selector
297
  model_selector = gr.Dropdown(
298
  label="Select Model",
299
  choices=[
 
304
  "moonshotai/kimi-k2-instruct",
305
  ],
306
  value=model,
 
307
  )
308
+ model_status = gr.Markdown(f"Current model: **{model}**")
 
309
  model_selector.change(set_model, inputs=model_selector, outputs=model_status)
310
 
311
+ # Command Runner
312
+ command_input = gr.Textbox(label="Command", placeholder="e.g., ls")
313
+ run_btn = gr.Button("Run", variant="primary")
314
+ command_output = gr.Markdown("Command output here...")
 
 
 
 
 
315
  run_btn.click(execute_command, inputs=command_input, outputs=command_output)
316
 
317
+ # Cleanup
318
+ atexit.register(lambda: (kill_sandbox(), print("[DEBUG] Sandbox terminated.")))
319
 
320
  if __name__ == "__main__":
321
  demo.launch()