62 lines
1.8 KiB
Python
62 lines
1.8 KiB
Python
import json
|
|
import os
|
|
from pathlib import Path
|
|
|
|
import pytest
|
|
from fastapi.testclient import TestClient
|
|
import respx
|
|
|
|
from app.api_app import create_api_app
|
|
from app.ui_app import create_ui_app
|
|
|
|
|
|
@pytest.fixture()
|
|
def agents_config(tmp_path: Path) -> Path:
|
|
data = {
|
|
"image": "ghcr.io/ggml-org/llama.cpp:server-cuda",
|
|
"container_name": "ix-llamacpp-llamacpp-1",
|
|
"host_port": 8071,
|
|
"container_port": 8080,
|
|
"web_ui_url": "http://0.0.0.0:8071/",
|
|
"model_host_path": str(tmp_path),
|
|
"model_container_path": str(tmp_path),
|
|
"models": [],
|
|
"network": "ix-llamacpp_default",
|
|
"subnets": ["172.16.18.0/24"],
|
|
"gpu_count": 2,
|
|
"gpu_name": "NVIDIA RTX 5060 Ti",
|
|
}
|
|
path = tmp_path / "agents_config.json"
|
|
path.write_text(json.dumps(data), encoding="utf-8")
|
|
return path
|
|
|
|
|
|
@pytest.fixture()
|
|
def model_dir(tmp_path: Path) -> Path:
|
|
(tmp_path / "model-a.gguf").write_text("x", encoding="utf-8")
|
|
(tmp_path / "model-b.gguf").write_text("y", encoding="utf-8")
|
|
return tmp_path
|
|
|
|
|
|
@pytest.fixture()
|
|
def api_client(monkeypatch: pytest.MonkeyPatch, agents_config: Path, model_dir: Path):
|
|
monkeypatch.setenv("AGENTS_CONFIG_PATH", str(agents_config))
|
|
monkeypatch.setenv("MODEL_DIR", str(model_dir))
|
|
monkeypatch.setenv("LLAMACPP_BASE_URL", "http://llama.test")
|
|
app = create_api_app()
|
|
return TestClient(app)
|
|
|
|
|
|
@pytest.fixture()
|
|
def ui_client(monkeypatch: pytest.MonkeyPatch, agents_config: Path, model_dir: Path):
|
|
monkeypatch.setenv("AGENTS_CONFIG_PATH", str(agents_config))
|
|
monkeypatch.setenv("MODEL_DIR", str(model_dir))
|
|
app = create_ui_app()
|
|
return TestClient(app)
|
|
|
|
|
|
@pytest.fixture()
|
|
def respx_mock():
|
|
with respx.mock(assert_all_called=False) as mock:
|
|
yield mock
|