runtime error

Exit code: 1. Reason: as_main return _run_code(code, main_globals, None, File "/usr/lib/python3.8/runpy.py", line 87, in _run_code exec(code, run_globals) File "/usr/local/lib/python3.8/dist-packages/vllm/entrypoints/openai/api_server.py", line 157, in <module> engine = AsyncLLMEngine.from_engine_args( File "/usr/local/lib/python3.8/dist-packages/vllm/engine/async_llm_engine.py", line 348, in from_engine_args engine = cls( File "/usr/local/lib/python3.8/dist-packages/vllm/engine/async_llm_engine.py", line 311, in __init__ self.engine = self._init_engine(*args, **kwargs) File "/usr/local/lib/python3.8/dist-packages/vllm/engine/async_llm_engine.py", line 422, in _init_engine return engine_class(*args, **kwargs) File "/usr/local/lib/python3.8/dist-packages/vllm/engine/llm_engine.py", line 110, in __init__ self.model_executor = executor_class(model_config, cache_config, File "/usr/local/lib/python3.8/dist-packages/vllm/executor/gpu_executor.py", line 37, in __init__ self._init_worker() File "/usr/local/lib/python3.8/dist-packages/vllm/executor/gpu_executor.py", line 45, in _init_worker from vllm.worker.worker import Worker File "/usr/local/lib/python3.8/dist-packages/vllm/worker/worker.py", line 21, in <module> from vllm.worker.model_runner import ModelRunner File "/usr/local/lib/python3.8/dist-packages/vllm/worker/model_runner.py", line 17, in <module> from vllm.model_executor.model_loader import get_model File "/usr/local/lib/python3.8/dist-packages/vllm/model_executor/model_loader.py", line 10, in <module> from vllm.model_executor.models.llava import LlavaForConditionalGeneration File "/usr/local/lib/python3.8/dist-packages/vllm/model_executor/models/llava.py", line 11, in <module> from vllm.model_executor.layers.activation import get_act_fn File "/usr/local/lib/python3.8/dist-packages/vllm/model_executor/layers/activation.py", line 9, in <module> from vllm._C import ops ImportError: libcuda.so.1: cannot open shared object file: No such file or directory

Container logs:

Fetching error logs...