diff --git a/cortex/ask.py b/cortex/ask.py index 33c06351..dd4643b5 100644 --- a/cortex/ask.py +++ b/cortex/ask.py @@ -10,6 +10,7 @@ import shutil import sqlite3 import subprocess +from pathlib import Path from typing import Any @@ -169,11 +170,47 @@ def _default_model(self) -> str: elif self.provider == "claude": return "claude-sonnet-4-20250514" elif self.provider == "ollama": - return "llama3.2" + return self._get_ollama_model() elif self.provider == "fake": return "fake" return "gpt-4" + def _get_ollama_model(self) -> str: + """Determine which Ollama model to use. + + The model name is resolved using the following precedence: + + 1. If the ``OLLAMA_MODEL`` environment variable is set, its value is + returned. + 2. Otherwise, if ``~/.cortex/config.json`` exists and contains an + ``"ollama_model"`` key, that value is returned. + 3. If neither of the above sources provides a model name, the + hard-coded default ``"llama3.2"`` is used. + + Any errors encountered while reading or parsing the configuration + file are silently ignored, and the resolution continues to the next + step in the precedence chain. + """ + # Try environment variable first + env_model = os.environ.get("OLLAMA_MODEL") + if env_model: + return env_model + + # Try config file + try: + config_file = Path.home() / ".cortex" / "config.json" + if config_file.exists(): + with open(config_file) as f: + config = json.load(f) + model = config.get("ollama_model") + if model: + return model + except Exception: + pass # Ignore errors reading config + + # Default to llama3.2 + return "llama3.2" + def _initialize_client(self): if self.provider == "openai": try: diff --git a/cortex/dependency_check.py b/cortex/dependency_check.py index d42e610f..1c070076 100644 --- a/cortex/dependency_check.py +++ b/cortex/dependency_check.py @@ -43,7 +43,7 @@ def format_installation_instructions(missing: list[str]) -> str: ╰─────────────────────────────────────────────────────────────────╯ Cortex requires the following packages that are not installed: - {', '.join(missing)} + {", ".join(missing)} To fix this, run ONE of the following: diff --git a/cortex/first_run_wizard.py b/cortex/first_run_wizard.py index c31f9fb0..73b8f921 100644 --- a/cortex/first_run_wizard.py +++ b/cortex/first_run_wizard.py @@ -371,16 +371,40 @@ def _setup_ollama(self) -> StepResult: print("\n✗ Failed to install Ollama") return StepResult(success=True, data={"api_provider": "none"}) - # Pull a small model - print("\nPulling llama3.2 model (this may take a few minutes)...") + # Let user choose model or use default + print("\nWhich Ollama model would you like to use?") + print(" 1. llama3.2 (2GB) - Recommended for most users") + print(" 2. llama3.2:1b (1.3GB) - Faster, less RAM") + print(" 3. mistral (4GB) - Alternative quality model") + print(" 4. phi3 (2.3GB) - Microsoft's efficient model") + print(" 5. Custom (enter your own)") + + model_choices = { + "1": "llama3.2", + "2": "llama3.2:1b", + "3": "mistral", + "4": "phi3", + } + + choice = self._prompt("\nEnter choice [1]: ", default="1") + + if choice == "5": + model_name = self._prompt("Enter model name: ", default="llama3.2") + else: + model_name = model_choices[choice] + + # Pull the selected model + print(f"\nPulling {model_name} model (this may take a few minutes)...") try: - subprocess.run(["ollama", "pull", "llama3.2"], check=True) + subprocess.run(["ollama", "pull", model_name], check=True) print("\n✓ Model ready!") except subprocess.CalledProcessError: - print("\n⚠ Could not pull model - you can do this later with: ollama pull llama3.2") + print( + f"\n⚠ Could not pull model - you can do this later with: ollama pull {model_name}" + ) self.config["api_provider"] = "ollama" - self.config["ollama_model"] = "llama3.2" + self.config["ollama_model"] = model_name return StepResult(success=True, data={"api_provider": "ollama"}) diff --git a/cortex/llm/interpreter.py b/cortex/llm/interpreter.py index 74870d75..88263028 100644 --- a/cortex/llm/interpreter.py +++ b/cortex/llm/interpreter.py @@ -112,7 +112,8 @@ def _initialize_client(self): ollama_base_url = os.environ.get("OLLAMA_BASE_URL", "http://localhost:11434") self.client = OpenAI( - api_key="ollama", base_url=f"{ollama_base_url}/v1" # Dummy key, not used + api_key="ollama", + base_url=f"{ollama_base_url}/v1", # Dummy key, not used ) except ImportError: raise ImportError("OpenAI package not installed. Run: pip install openai") diff --git a/tests/test_ask.py b/tests/test_ask.py index 0fe53176..c7cd9306 100644 --- a/tests/test_ask.py +++ b/tests/test_ask.py @@ -240,8 +240,31 @@ def test_default_model_claude(self): def test_default_model_ollama(self): """Test default model for Ollama.""" + # Test with environment variable + + # Save and clear any existing OLLAMA_MODEL + original_model = os.environ.get("OLLAMA_MODEL") + + # Test with custom env variable + os.environ["OLLAMA_MODEL"] = "test-model" handler = AskHandler(api_key="test", provider="ollama") - self.assertEqual(handler.model, "llama3.2") + self.assertEqual(handler.model, "test-model") + + # Clean up + if original_model is not None: + os.environ["OLLAMA_MODEL"] = original_model + else: + os.environ.pop("OLLAMA_MODEL", None) + + # Test deterministic default behavior when no env var or config file exists. + # Point the home directory to a temporary location without ~/.cortex/config.json + with ( + tempfile.TemporaryDirectory() as tmpdir, + patch("os.path.expanduser", return_value=tmpdir), + ): + handler2 = AskHandler(api_key="test", provider="ollama") + # When no env var and no config file exist, AskHandler should use its built-in default. + self.assertEqual(handler2.model, "llama3.2") def test_default_model_fake(self): """Test default model for fake provider.""" diff --git a/tests/test_cli.py b/tests/test_cli.py index 1f97bc1a..6ca935f0 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -59,14 +59,16 @@ def test_print_success(self, mock_stdout): self.assertTrue(True) @patch.dict(os.environ, {}, clear=True) - def test_install_no_api_key(self): - # When no API key is set, the CLI falls back to Ollama. - # If Ollama is running, this should succeed. If not, it should fail. - # We'll mock Ollama to be unavailable to test the failure case. - with patch("cortex.llm.interpreter.CommandInterpreter.parse") as mock_parse: - mock_parse.side_effect = RuntimeError("Ollama not available") - result = self.cli.install("docker") - self.assertEqual(result, 1) + @patch("cortex.cli.CommandInterpreter") + def test_install_no_api_key(self, mock_interpreter_class): + # Should work with Ollama (no API key needed) + mock_interpreter = Mock() + mock_interpreter.parse.return_value = ["apt update", "apt install docker"] + mock_interpreter_class.return_value = mock_interpreter + + result = self.cli.install("docker") + # Should succeed with Ollama as fallback provider + self.assertEqual(result, 0) @patch.dict(os.environ, {"OPENAI_API_KEY": "sk-test-openai-key-123"}, clear=True) @patch("cortex.cli.CommandInterpreter") diff --git a/tests/test_ollama_integration.py b/tests/test_ollama_integration.py index f290c9ed..be7fda4b 100755 --- a/tests/test_ollama_integration.py +++ b/tests/test_ollama_integration.py @@ -12,6 +12,7 @@ python tests/test_ollama_integration.py """ +import os import subprocess import sys from pathlib import Path @@ -23,10 +24,34 @@ from cortex.llm_router import LLMProvider, LLMRouter, TaskType -# Mark all tests to skip if Ollama is not available + +def get_available_ollama_model(): + """Get the first available Ollama model, or None if none available.""" + try: + result = subprocess.run( + ["ollama", "list"], + capture_output=True, + text=True, + timeout=5, + ) + if result.returncode == 0: + lines = result.stdout.split("\n")[1:] # Skip header + for line in lines: + if line.strip(): + model_name = line.split()[0] + if model_name: + return model_name + except Exception as e: + # If there is any error querying Ollama, treat it as "no models available" + print(f"Warning: could not list Ollama models: {e}", file=sys.stderr) + return None + + +# Mark all tests to skip if Ollama is not available or no models installed pytestmark = pytest.mark.skipif( - not subprocess.run(["which", "ollama"], capture_output=True).returncode == 0, - reason="Ollama is not installed. Install with: python scripts/setup_ollama.py", + not subprocess.run(["which", "ollama"], capture_output=True).returncode == 0 + or get_available_ollama_model() is None, + reason="Ollama is not installed or no models available. Install with: python scripts/setup_ollama.py", ) @@ -73,11 +98,16 @@ def test_llm_router(): """Test LLMRouter with Ollama.""" print("3. Testing LLM Router with Ollama...") + # Get available model + test_model = os.environ.get("OLLAMA_MODEL") or get_available_ollama_model() + if not test_model: + pytest.skip("No Ollama models available") + try: # Initialize router with Ollama router = LLMRouter( ollama_base_url="http://localhost:11434", - ollama_model="llama3.2", + ollama_model=test_model, default_provider=LLMProvider.OLLAMA, enable_fallback=False, # Don't fall back to cloud APIs ) @@ -115,10 +145,15 @@ def test_routing_decision(): """Test routing logic with Ollama.""" print("4. Testing routing decision...") + # Get available model + test_model = os.environ.get("OLLAMA_MODEL") or get_available_ollama_model() + if not test_model: + pytest.skip("No Ollama models available") + try: router = LLMRouter( ollama_base_url="http://localhost:11434", - ollama_model="llama3.2", + ollama_model=test_model, default_provider=LLMProvider.OLLAMA, ) @@ -145,10 +180,15 @@ def test_stats_tracking(): """Test that stats tracking works with Ollama.""" print("5. Testing stats tracking...") + # Get available model + test_model = os.environ.get("OLLAMA_MODEL") or get_available_ollama_model() + if not test_model: + pytest.skip("No Ollama models available") + try: router = LLMRouter( ollama_base_url="http://localhost:11434", - ollama_model="llama3.2", + ollama_model=test_model, default_provider=LLMProvider.OLLAMA, track_costs=True, )