34
34
from pathlib import Path
35
35
from typing import TYPE_CHECKING , List , Optional
36
36
37
+ from textual .containers import Container
37
38
from textual .css .query import QueryError
38
39
from textual .worker import Worker , WorkerState
39
40
from textual .widgets import Input , RichLog , TextArea
57
58
"handle_vllm_browse_python_button_pressed" ,
58
59
"handle_vllm_browse_model_button_pressed" ,
59
60
"handle_start_vllm_server_button_pressed" ,
61
+ "handle_stop_vllm_server_button_pressed" ,
60
62
# ─── Model download ───────────────────────────────────────────────────────
61
63
"handle_browse_models_dir_button_pressed" ,
62
64
"handle_start_model_download_button_pressed" ,
65
+ # ─── Ollama ───────────────────────────────────────────────────────────────
66
+ "handle_ollama_nav_button_pressed" ,
63
67
]
64
68
65
69
###############################################################################
@@ -220,6 +224,7 @@ def run_vllm_server_worker(app_instance: "TldwCli", command: List[str]):
220
224
221
225
logger = getattr (app_instance , "loguru_logger" , logging .getLogger (__name__ ))
222
226
logger .info ("vLLM worker begins: %s" , " " .join (command ))
227
+ app_instance .vllm_server_process = None # Clear any old process reference
223
228
224
229
try :
225
230
process = subprocess .Popen (
@@ -230,6 +235,7 @@ def run_vllm_server_worker(app_instance: "TldwCli", command: List[str]):
230
235
bufsize = 1 ,
231
236
universal_newlines = True ,
232
237
)
238
+ app_instance .vllm_server_process = process # Store the process
233
239
234
240
app_instance .call_from_thread (
235
241
app_instance ._update_vllm_log , f"vLLM server started (PID: { process .pid } )…\n "
@@ -238,15 +244,19 @@ def run_vllm_server_worker(app_instance: "TldwCli", command: List[str]):
238
244
process .wait ()
239
245
yield f"vLLM server exited with code: { process .returncode } \n "
240
246
except FileNotFoundError :
247
+ app_instance .vllm_server_process = None # Clear process on error
241
248
msg = f"ERROR: vLLM interpreter not found: { command [0 ]} \n "
242
249
logger .error (msg .rstrip ())
243
250
app_instance .call_from_thread (app_instance ._update_vllm_log , msg )
244
251
yield msg
245
252
except Exception as err :
253
+ app_instance .vllm_server_process = None # Clear process on error
246
254
msg = f"ERROR in vLLM worker: { err } \n "
247
255
logger .error (msg .rstrip (), exc_info = True )
248
256
app_instance .call_from_thread (app_instance ._update_vllm_log , msg )
249
257
yield msg
258
+ finally :
259
+ app_instance .vllm_server_process = None # Ensure process is cleared
250
260
251
261
252
262
def run_model_download_worker (app_instance : "TldwCli" , command : List [str ]):
@@ -590,6 +600,48 @@ async def handle_start_vllm_server_button_pressed(app: "TldwCli") -> None:
590
600
app .notify ("Error setting up vLLM server start." , severity = "error" )
591
601
592
602
603
+ async def handle_stop_vllm_server_button_pressed (app : "TldwCli" ) -> None :
604
+ """Stops the vLLM server process if it's running."""
605
+ logger = getattr (app , "loguru_logger" , logging .getLogger (__name__ ))
606
+ logger .info ("User requested to stop vLLM server." )
607
+
608
+ log_output_widget = app .query_one ("#vllm-log-output" , RichLog )
609
+
610
+ if hasattr (app , "vllm_server_process" ) and app .vllm_server_process :
611
+ process = app .vllm_server_process
612
+ if process .poll () is None : # Process is running
613
+ logger .info (f"Stopping vLLM server process (PID: { process .pid } )." )
614
+ log_output_widget .write (f"Stopping vLLM server (PID: { process .pid } )...\n " )
615
+ process .terminate () # or process.kill()
616
+ try :
617
+ process .wait (timeout = 10 ) # Wait for up to 10 seconds
618
+ logger .info ("vLLM server process terminated." )
619
+ log_output_widget .write ("vLLM server stopped.\n " )
620
+ app .notify ("vLLM server stopped." )
621
+ except subprocess .TimeoutExpired :
622
+ logger .warning ("Timeout waiting for vLLM server to terminate. Killing." )
623
+ log_output_widget .write ("vLLM server did not stop gracefully, killing...\n " )
624
+ process .kill ()
625
+ process .wait () # Ensure it's killed
626
+ log_output_widget .write ("vLLM server killed.\n " )
627
+ app .notify ("vLLM server killed after timeout." , severity = "warning" )
628
+ except Exception as e : # pylint: disable=broad-except
629
+ logger .error (f"Error during vLLM server termination: { e } " , exc_info = True )
630
+ log_output_widget .write (f"Error stopping vLLM server: { e } \n " )
631
+ app .notify (f"Error stopping vLLM server: { e } " , severity = "error" )
632
+ finally :
633
+ app .vllm_server_process = None
634
+ else :
635
+ logger .info ("vLLM server process was found but is not running." )
636
+ log_output_widget .write ("vLLM server is not currently running.\n " )
637
+ app .notify ("vLLM server is not running." , severity = "warning" )
638
+ app .vllm_server_process = None # Clear the stale process reference
639
+ else :
640
+ logger .info ("No vLLM server process found to stop." )
641
+ log_output_widget .write ("vLLM server is not currently running.\n " )
642
+ app .notify ("vLLM server is not running." , severity = "warning" )
643
+
644
+
593
645
###############################################################################
594
646
# ─── Model download UI helpers ──────────────────────────────────────────────
595
647
###############################################################################
@@ -666,3 +718,41 @@ async def handle_start_model_download_button_pressed(app: "TldwCli") -> None:
666
718
except Exception as err : # pragma: no cover
667
719
logger .error ("Error preparing model download: %s" , err , exc_info = True )
668
720
app .notify ("Error setting up model download." , severity = "error" )
721
+
722
+
723
+ ###############################################################################
724
+ # ─── Ollama UI helpers ──────────────────────────────────────────────────────
725
+ ###############################################################################
726
+
727
+
728
+ async def handle_ollama_nav_button_pressed (app : "TldwCli" ) -> None :
729
+ """Handle the Ollama navigation button press."""
730
+ logger = getattr (app , "loguru_logger" , logging .getLogger (__name__ ))
731
+ logger .debug ("Ollama nav button pressed." )
732
+
733
+ try :
734
+ content_pane = app .query_one ("#llm-content-pane" , Container )
735
+ view_areas = content_pane .query (".llm-view-area" )
736
+
737
+ for view in view_areas :
738
+ if view .id : # Only hide if it has an ID
739
+ logger .debug (f"Hiding view #{ view .id } " )
740
+ view .styles .display = "none"
741
+ else : # pragma: no cover
742
+ logger .warning ("Found a .llm-view-area without an ID, not hiding it." )
743
+
744
+ ollama_view = app .query_one ("#llm-view-ollama" , Container )
745
+ logger .debug (f"Showing view #{ ollama_view .id } " )
746
+ ollama_view .styles .display = "block"
747
+ #app.notify("Switched to Ollama view.")
748
+
749
+ except QueryError as e : # pragma: no cover
750
+ logger .error (f"QueryError in handle_ollama_nav_button_pressed: { e } " , exc_info = True )
751
+ app .notify ("Error switching to Ollama view: Could not find required UI elements." , severity = "error" )
752
+ except Exception as e : # pragma: no cover
753
+ logger .error (f"Unexpected error in handle_ollama_nav_button_pressed: { e } " , exc_info = True )
754
+ app .notify ("An unexpected error occurred while switching to Ollama view." , severity = "error" )
755
+
756
+ #
757
+ # End of llm_management_events.py
758
+ ########################################################################################################################
0 commit comments