From 874da2cec4f26f2c3b4abdc63267bc8f75de40f8 Mon Sep 17 00:00:00 2001 From: sujay-d07 Date: Sat, 27 Dec 2025 18:30:43 +0530 Subject: [PATCH 01/11] Removed the hardcoded Ollama Models and now works with any Ollama Model --- cortex/ask.py | 26 ++++++++++++++++- cortex/first_run_wizard.py | 32 ++++++++++++++++---- tests/test_ask.py | 22 +++++++++++++- tests/test_cli.py | 13 +++++++++ tests/test_ollama_integration.py | 50 ++++++++++++++++++++++++++++---- 5 files changed, 130 insertions(+), 13 deletions(-) diff --git a/cortex/ask.py b/cortex/ask.py index 33c06351..1fe2ff34 100644 --- a/cortex/ask.py +++ b/cortex/ask.py @@ -169,11 +169,35 @@ def _default_model(self) -> str: elif self.provider == "claude": return "claude-sonnet-4-20250514" elif self.provider == "ollama": - return "llama3.2" + return self._get_ollama_model() elif self.provider == "fake": return "fake" return "gpt-4" + def _get_ollama_model(self) -> str: + """Get Ollama model from environment or config file.""" + # Try environment variable first + env_model = os.environ.get("OLLAMA_MODEL") + if env_model: + return env_model + + # Try config file + try: + from pathlib import Path + + config_file = Path.home() / ".cortex" / "config.json" + if config_file.exists(): + with open(config_file) as f: + config = json.load(f) + model = config.get("ollama_model") + if model: + return model + except Exception: + pass # Ignore errors reading config + + # Default to llama3.2 + return "llama3.2" + def _initialize_client(self): if self.provider == "openai": try: diff --git a/cortex/first_run_wizard.py b/cortex/first_run_wizard.py index c31f9fb0..891430c0 100644 --- a/cortex/first_run_wizard.py +++ b/cortex/first_run_wizard.py @@ -371,16 +371,38 @@ def _setup_ollama(self) -> StepResult: print("\n✗ Failed to install Ollama") return StepResult(success=True, data={"api_provider": "none"}) - # Pull a small model - print("\nPulling llama3.2 model (this may take a few minutes)...") + # Let user choose model or use default + print("\nWhich Ollama model would you like to use?") + print(" 1. llama3.2 (2GB) - Recommended for most users") + print(" 2. llama3.2:1b (1.3GB) - Faster, less RAM") + print(" 3. mistral (4GB) - Alternative quality model") + print(" 4. phi3 (2.3GB) - Microsoft's efficient model") + print(" 5. Custom (enter your own)") + + model_choices = { + "1": "llama3.2", + "2": "llama3.2:1b", + "3": "mistral", + "4": "phi3", + } + + choice = input("\nEnter choice [1]: ").strip() or "1" + + if choice == "5": + model_name = input("Enter model name: ").strip() or "llama3.2" + else: + model_name = model_choices.get(choice, "llama3.2") + + # Pull the selected model + print(f"\nPulling {model_name} model (this may take a few minutes)...") try: - subprocess.run(["ollama", "pull", "llama3.2"], check=True) + subprocess.run(["ollama", "pull", model_name], check=True) print("\n✓ Model ready!") except subprocess.CalledProcessError: - print("\n⚠ Could not pull model - you can do this later with: ollama pull llama3.2") + print(f"\n⚠ Could not pull model - you can do this later with: ollama pull {model_name}") self.config["api_provider"] = "ollama" - self.config["ollama_model"] = "llama3.2" + self.config["ollama_model"] = model_name return StepResult(success=True, data={"api_provider": "ollama"}) diff --git a/tests/test_ask.py b/tests/test_ask.py index 0fe53176..66bcd066 100644 --- a/tests/test_ask.py +++ b/tests/test_ask.py @@ -240,8 +240,28 @@ def test_default_model_claude(self): def test_default_model_ollama(self): """Test default model for Ollama.""" + # Test with environment variable + import os + + # Save and clear any existing OLLAMA_MODEL + original_model = os.environ.get("OLLAMA_MODEL") + + # Test with custom env variable + os.environ["OLLAMA_MODEL"] = "test-model" handler = AskHandler(api_key="test", provider="ollama") - self.assertEqual(handler.model, "llama3.2") + self.assertEqual(handler.model, "test-model") + + # Clean up + if original_model: + os.environ["OLLAMA_MODEL"] = original_model + else: + os.environ.pop("OLLAMA_MODEL", None) + + # Test that it reads from config or defaults + # (depends on ~/.cortex/config.json, so just verify it's not None) + handler2 = AskHandler(api_key="test", provider="ollama") + self.assertIsNotNone(handler2.model) + self.assertIsInstance(handler2.model, str) def test_default_model_fake(self): """Test default model for fake provider.""" diff --git a/tests/test_cli.py b/tests/test_cli.py index 1f97bc1a..f9f82a4b 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -59,6 +59,7 @@ def test_print_success(self, mock_stdout): self.assertTrue(True) @patch.dict(os.environ, {}, clear=True) +<<<<<<< HEAD def test_install_no_api_key(self): # When no API key is set, the CLI falls back to Ollama. # If Ollama is running, this should succeed. If not, it should fail. @@ -67,6 +68,18 @@ def test_install_no_api_key(self): mock_parse.side_effect = RuntimeError("Ollama not available") result = self.cli.install("docker") self.assertEqual(result, 1) +======= + @patch("cortex.cli.CommandInterpreter") + def test_install_no_api_key(self, mock_interpreter_class): + # Should work with Ollama (no API key needed) + mock_interpreter = Mock() + mock_interpreter.parse.return_value = ["apt update", "apt install docker"] + mock_interpreter_class.return_value = mock_interpreter + + result = self.cli.install("docker") + # Should succeed with Ollama as fallback provider + self.assertEqual(result, 0) +>>>>>>> a2e81e1 (Removed the hardcoded Ollama Models and now works with any Ollama Model) @patch.dict(os.environ, {"OPENAI_API_KEY": "sk-test-openai-key-123"}, clear=True) @patch("cortex.cli.CommandInterpreter") diff --git a/tests/test_ollama_integration.py b/tests/test_ollama_integration.py index f290c9ed..6553b0f4 100755 --- a/tests/test_ollama_integration.py +++ b/tests/test_ollama_integration.py @@ -12,6 +12,7 @@ python tests/test_ollama_integration.py """ +import os import subprocess import sys from pathlib import Path @@ -23,10 +24,32 @@ from cortex.llm_router import LLMProvider, LLMRouter, TaskType -# Mark all tests to skip if Ollama is not available +def get_available_ollama_model(): + """Get the first available Ollama model, or None if none available.""" + try: + result = subprocess.run( + ["ollama", "list"], + capture_output=True, + text=True, + timeout=5, + ) + if result.returncode == 0: + lines = result.stdout.split("\n")[1:] # Skip header + for line in lines: + if line.strip(): + model_name = line.split()[0] + if model_name: + return model_name + except Exception: + pass + return None + + +# Mark all tests to skip if Ollama is not available or no models installed pytestmark = pytest.mark.skipif( - not subprocess.run(["which", "ollama"], capture_output=True).returncode == 0, - reason="Ollama is not installed. Install with: python scripts/setup_ollama.py", + not subprocess.run(["which", "ollama"], capture_output=True).returncode == 0 + or get_available_ollama_model() is None, + reason="Ollama is not installed or no models available. Install with: python scripts/setup_ollama.py", ) @@ -73,11 +96,16 @@ def test_llm_router(): """Test LLMRouter with Ollama.""" print("3. Testing LLM Router with Ollama...") + # Get available model + test_model = os.environ.get("OLLAMA_MODEL") or get_available_ollama_model() + if not test_model: + pytest.skip("No Ollama models available") + try: # Initialize router with Ollama router = LLMRouter( ollama_base_url="http://localhost:11434", - ollama_model="llama3.2", + ollama_model=test_model, default_provider=LLMProvider.OLLAMA, enable_fallback=False, # Don't fall back to cloud APIs ) @@ -115,10 +143,15 @@ def test_routing_decision(): """Test routing logic with Ollama.""" print("4. Testing routing decision...") + # Get available model + test_model = os.environ.get("OLLAMA_MODEL") or get_available_ollama_model() + if not test_model: + pytest.skip("No Ollama models available") + try: router = LLMRouter( ollama_base_url="http://localhost:11434", - ollama_model="llama3.2", + ollama_model=test_model, default_provider=LLMProvider.OLLAMA, ) @@ -145,10 +178,15 @@ def test_stats_tracking(): """Test that stats tracking works with Ollama.""" print("5. Testing stats tracking...") + # Get available model + test_model = os.environ.get("OLLAMA_MODEL") or get_available_ollama_model() + if not test_model: + pytest.skip("No Ollama models available") + try: router = LLMRouter( ollama_base_url="http://localhost:11434", - ollama_model="llama3.2", + ollama_model=test_model, default_provider=LLMProvider.OLLAMA, track_costs=True, ) From 8d5d98ae3ea933713544a5669539c466353baf7f Mon Sep 17 00:00:00 2001 From: sujay-d07 Date: Sat, 27 Dec 2025 18:47:42 +0530 Subject: [PATCH 02/11] Refactor code for improved readability by removing unnecessary blank lines in various test files and the first_run_wizard module. --- cortex/first_run_wizard.py | 6 ++++-- tests/test_ask.py | 8 ++++---- tests/test_cli.py | 2 +- tests/test_ollama_integration.py | 1 + 4 files changed, 10 insertions(+), 7 deletions(-) diff --git a/cortex/first_run_wizard.py b/cortex/first_run_wizard.py index 891430c0..d059c41f 100644 --- a/cortex/first_run_wizard.py +++ b/cortex/first_run_wizard.py @@ -387,7 +387,7 @@ def _setup_ollama(self) -> StepResult: } choice = input("\nEnter choice [1]: ").strip() or "1" - + if choice == "5": model_name = input("Enter model name: ").strip() or "llama3.2" else: @@ -399,7 +399,9 @@ def _setup_ollama(self) -> StepResult: subprocess.run(["ollama", "pull", model_name], check=True) print("\n✓ Model ready!") except subprocess.CalledProcessError: - print(f"\n⚠ Could not pull model - you can do this later with: ollama pull {model_name}") + print( + f"\n⚠ Could not pull model - you can do this later with: ollama pull {model_name}" + ) self.config["api_provider"] = "ollama" self.config["ollama_model"] = model_name diff --git a/tests/test_ask.py b/tests/test_ask.py index 66bcd066..5897f06a 100644 --- a/tests/test_ask.py +++ b/tests/test_ask.py @@ -242,21 +242,21 @@ def test_default_model_ollama(self): """Test default model for Ollama.""" # Test with environment variable import os - + # Save and clear any existing OLLAMA_MODEL original_model = os.environ.get("OLLAMA_MODEL") - + # Test with custom env variable os.environ["OLLAMA_MODEL"] = "test-model" handler = AskHandler(api_key="test", provider="ollama") self.assertEqual(handler.model, "test-model") - + # Clean up if original_model: os.environ["OLLAMA_MODEL"] = original_model else: os.environ.pop("OLLAMA_MODEL", None) - + # Test that it reads from config or defaults # (depends on ~/.cortex/config.json, so just verify it's not None) handler2 = AskHandler(api_key="test", provider="ollama") diff --git a/tests/test_cli.py b/tests/test_cli.py index f9f82a4b..39a87d4f 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -75,7 +75,7 @@ def test_install_no_api_key(self, mock_interpreter_class): mock_interpreter = Mock() mock_interpreter.parse.return_value = ["apt update", "apt install docker"] mock_interpreter_class.return_value = mock_interpreter - + result = self.cli.install("docker") # Should succeed with Ollama as fallback provider self.assertEqual(result, 0) diff --git a/tests/test_ollama_integration.py b/tests/test_ollama_integration.py index 6553b0f4..73ae2493 100755 --- a/tests/test_ollama_integration.py +++ b/tests/test_ollama_integration.py @@ -24,6 +24,7 @@ from cortex.llm_router import LLMProvider, LLMRouter, TaskType + def get_available_ollama_model(): """Get the first available Ollama model, or None if none available.""" try: From 13fbca3520c6e43ace10d30de538b28a8f8e115e Mon Sep 17 00:00:00 2001 From: Sujay <163128998+sujay-d07@users.noreply.github.com> Date: Sat, 27 Dec 2025 18:50:42 +0530 Subject: [PATCH 03/11] Update tests/test_ask.py Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- tests/test_ask.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/tests/test_ask.py b/tests/test_ask.py index 5897f06a..936f68ab 100644 --- a/tests/test_ask.py +++ b/tests/test_ask.py @@ -252,16 +252,17 @@ def test_default_model_ollama(self): self.assertEqual(handler.model, "test-model") # Clean up - if original_model: + if original_model is not None: os.environ["OLLAMA_MODEL"] = original_model else: os.environ.pop("OLLAMA_MODEL", None) - # Test that it reads from config or defaults - # (depends on ~/.cortex/config.json, so just verify it's not None) - handler2 = AskHandler(api_key="test", provider="ollama") - self.assertIsNotNone(handler2.model) - self.assertIsInstance(handler2.model, str) + # Test deterministic default behavior when no env var or config file exists. + # Point the home directory to a temporary location without ~/.cortex/config.json + with tempfile.TemporaryDirectory() as tmpdir, patch("os.path.expanduser", return_value=tmpdir): + handler2 = AskHandler(api_key="test", provider="ollama") + # When no env var and no config file exist, AskHandler should use its built-in default. + self.assertEqual(handler2.model, "llama3.2") def test_default_model_fake(self): """Test default model for fake provider.""" From 9aa48a9402d1b8add2c44a3748ead49b658e64a1 Mon Sep 17 00:00:00 2001 From: Sujay <163128998+sujay-d07@users.noreply.github.com> Date: Sat, 27 Dec 2025 18:50:58 +0530 Subject: [PATCH 04/11] Update cortex/ask.py Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- cortex/ask.py | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/cortex/ask.py b/cortex/ask.py index 1fe2ff34..868b7c31 100644 --- a/cortex/ask.py +++ b/cortex/ask.py @@ -175,7 +175,21 @@ def _default_model(self) -> str: return "gpt-4" def _get_ollama_model(self) -> str: - """Get Ollama model from environment or config file.""" + """Determine which Ollama model to use. + + The model name is resolved using the following precedence: + + 1. If the ``OLLAMA_MODEL`` environment variable is set, its value is + returned. + 2. Otherwise, if ``~/.cortex/config.json`` exists and contains an + ``"ollama_model"`` key, that value is returned. + 3. If neither of the above sources provides a model name, the + hard-coded default ``"llama3.2"`` is used. + + Any errors encountered while reading or parsing the configuration + file are silently ignored, and the resolution continues to the next + step in the precedence chain. + """ # Try environment variable first env_model = os.environ.get("OLLAMA_MODEL") if env_model: From f5bafc04d036c6bd8e92b9ed994ec0f20ca00c6f Mon Sep 17 00:00:00 2001 From: Sujay <163128998+sujay-d07@users.noreply.github.com> Date: Sat, 27 Dec 2025 18:51:20 +0530 Subject: [PATCH 05/11] Update tests/test_ask.py Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- tests/test_ask.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/test_ask.py b/tests/test_ask.py index 936f68ab..aa6e4cde 100644 --- a/tests/test_ask.py +++ b/tests/test_ask.py @@ -241,7 +241,6 @@ def test_default_model_claude(self): def test_default_model_ollama(self): """Test default model for Ollama.""" # Test with environment variable - import os # Save and clear any existing OLLAMA_MODEL original_model = os.environ.get("OLLAMA_MODEL") From 58f989564ec41403189b72943de10a22a9d59dec Mon Sep 17 00:00:00 2001 From: Sujay <163128998+sujay-d07@users.noreply.github.com> Date: Sat, 27 Dec 2025 18:51:36 +0530 Subject: [PATCH 06/11] Update cortex/first_run_wizard.py Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- cortex/first_run_wizard.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/cortex/first_run_wizard.py b/cortex/first_run_wizard.py index d059c41f..c70e9ed4 100644 --- a/cortex/first_run_wizard.py +++ b/cortex/first_run_wizard.py @@ -386,12 +386,16 @@ def _setup_ollama(self) -> StepResult: "4": "phi3", } - choice = input("\nEnter choice [1]: ").strip() or "1" + while True: + choice = input("\nEnter choice [1]: ").strip() or "1" + if choice in model_choices or choice == "5": + break + print("Invalid choice. Please enter a number between 1 and 5.") if choice == "5": model_name = input("Enter model name: ").strip() or "llama3.2" else: - model_name = model_choices.get(choice, "llama3.2") + model_name = model_choices[choice] # Pull the selected model print(f"\nPulling {model_name} model (this may take a few minutes)...") From 2ce4dc6272852f31a6f96ea6a3e1aa9dc4cc73a7 Mon Sep 17 00:00:00 2001 From: Sujay <163128998+sujay-d07@users.noreply.github.com> Date: Sat, 27 Dec 2025 18:53:07 +0530 Subject: [PATCH 07/11] Update tests/test_ollama_integration.py Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- tests/test_ollama_integration.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/test_ollama_integration.py b/tests/test_ollama_integration.py index 73ae2493..be7fda4b 100755 --- a/tests/test_ollama_integration.py +++ b/tests/test_ollama_integration.py @@ -41,8 +41,9 @@ def get_available_ollama_model(): model_name = line.split()[0] if model_name: return model_name - except Exception: - pass + except Exception as e: + # If there is any error querying Ollama, treat it as "no models available" + print(f"Warning: could not list Ollama models: {e}", file=sys.stderr) return None From 7891b5f9b11430225c3099c2f827323909a2e1ee Mon Sep 17 00:00:00 2001 From: sujay-d07 Date: Sat, 27 Dec 2025 18:56:57 +0530 Subject: [PATCH 08/11] Refactor input handling in first_run_wizard to use _prompt method for consistency --- cortex/first_run_wizard.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/cortex/first_run_wizard.py b/cortex/first_run_wizard.py index c70e9ed4..73b8f921 100644 --- a/cortex/first_run_wizard.py +++ b/cortex/first_run_wizard.py @@ -386,14 +386,10 @@ def _setup_ollama(self) -> StepResult: "4": "phi3", } - while True: - choice = input("\nEnter choice [1]: ").strip() or "1" - if choice in model_choices or choice == "5": - break - print("Invalid choice. Please enter a number between 1 and 5.") + choice = self._prompt("\nEnter choice [1]: ", default="1") if choice == "5": - model_name = input("Enter model name: ").strip() or "llama3.2" + model_name = self._prompt("Enter model name: ", default="llama3.2") else: model_name = model_choices[choice] From 5fc95f93625162478fd2aea181719d64bdac684e Mon Sep 17 00:00:00 2001 From: sujay-d07 Date: Sat, 27 Dec 2025 19:00:01 +0530 Subject: [PATCH 09/11] . --- tests/test_ask.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tests/test_ask.py b/tests/test_ask.py index aa6e4cde..c7cd9306 100644 --- a/tests/test_ask.py +++ b/tests/test_ask.py @@ -258,7 +258,10 @@ def test_default_model_ollama(self): # Test deterministic default behavior when no env var or config file exists. # Point the home directory to a temporary location without ~/.cortex/config.json - with tempfile.TemporaryDirectory() as tmpdir, patch("os.path.expanduser", return_value=tmpdir): + with ( + tempfile.TemporaryDirectory() as tmpdir, + patch("os.path.expanduser", return_value=tmpdir), + ): handler2 = AskHandler(api_key="test", provider="ollama") # When no env var and no config file exist, AskHandler should use its built-in default. self.assertEqual(handler2.model, "llama3.2") From 3c3af6aeb772906ebaf92b502ceab1e4bb0cc6ca Mon Sep 17 00:00:00 2001 From: sujay-d07 Date: Tue, 30 Dec 2025 13:18:59 +0530 Subject: [PATCH 10/11] Refactor CLA check script for improved readability and consistency; update dependency check formatting; enhance CLI tests for Ollama fallback behavior --- cortex/dependency_check.py | 2 +- cortex/llm/interpreter.py | 3 ++- tests/test_cli.py | 11 ----------- 3 files changed, 3 insertions(+), 13 deletions(-) diff --git a/cortex/dependency_check.py b/cortex/dependency_check.py index d42e610f..1c070076 100644 --- a/cortex/dependency_check.py +++ b/cortex/dependency_check.py @@ -43,7 +43,7 @@ def format_installation_instructions(missing: list[str]) -> str: ╰─────────────────────────────────────────────────────────────────╯ Cortex requires the following packages that are not installed: - {', '.join(missing)} + {", ".join(missing)} To fix this, run ONE of the following: diff --git a/cortex/llm/interpreter.py b/cortex/llm/interpreter.py index 74870d75..88263028 100644 --- a/cortex/llm/interpreter.py +++ b/cortex/llm/interpreter.py @@ -112,7 +112,8 @@ def _initialize_client(self): ollama_base_url = os.environ.get("OLLAMA_BASE_URL", "http://localhost:11434") self.client = OpenAI( - api_key="ollama", base_url=f"{ollama_base_url}/v1" # Dummy key, not used + api_key="ollama", + base_url=f"{ollama_base_url}/v1", # Dummy key, not used ) except ImportError: raise ImportError("OpenAI package not installed. Run: pip install openai") diff --git a/tests/test_cli.py b/tests/test_cli.py index 39a87d4f..6ca935f0 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -59,16 +59,6 @@ def test_print_success(self, mock_stdout): self.assertTrue(True) @patch.dict(os.environ, {}, clear=True) -<<<<<<< HEAD - def test_install_no_api_key(self): - # When no API key is set, the CLI falls back to Ollama. - # If Ollama is running, this should succeed. If not, it should fail. - # We'll mock Ollama to be unavailable to test the failure case. - with patch("cortex.llm.interpreter.CommandInterpreter.parse") as mock_parse: - mock_parse.side_effect = RuntimeError("Ollama not available") - result = self.cli.install("docker") - self.assertEqual(result, 1) -======= @patch("cortex.cli.CommandInterpreter") def test_install_no_api_key(self, mock_interpreter_class): # Should work with Ollama (no API key needed) @@ -79,7 +69,6 @@ def test_install_no_api_key(self, mock_interpreter_class): result = self.cli.install("docker") # Should succeed with Ollama as fallback provider self.assertEqual(result, 0) ->>>>>>> a2e81e1 (Removed the hardcoded Ollama Models and now works with any Ollama Model) @patch.dict(os.environ, {"OPENAI_API_KEY": "sk-test-openai-key-123"}, clear=True) @patch("cortex.cli.CommandInterpreter") From dbea5e0adcaa153c39b681a59c7e25bb9b9422c8 Mon Sep 17 00:00:00 2001 From: sujay-d07 Date: Fri, 2 Jan 2026 12:38:38 +0530 Subject: [PATCH 11/11] Remove redundant import of Path in AskHandler class --- cortex/ask.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/cortex/ask.py b/cortex/ask.py index 868b7c31..dd4643b5 100644 --- a/cortex/ask.py +++ b/cortex/ask.py @@ -10,6 +10,7 @@ import shutil import sqlite3 import subprocess +from pathlib import Path from typing import Any @@ -197,8 +198,6 @@ def _get_ollama_model(self) -> str: # Try config file try: - from pathlib import Path - config_file = Path.home() / ".cortex" / "config.json" if config_file.exists(): with open(config_file) as f: