Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,8 @@ This application allows you to upload files from various sources (local machine,
8. Amazon Bedrock (dev deployed version)
9. Ollama (dev deployed version)
10. Deepseek (dev deployed version)
11. Other OpenAI-compatible base URL models (dev deployed version)
11. MiniMax (dev deployed version)
12. Other OpenAI-compatible base URL models (dev deployed version)


### **Token Usage Tracking**
Expand Down
2 changes: 2 additions & 0 deletions backend/example.env
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,8 @@ LLM_MODEL_CONFIG_fireworks_deepseek_v3="accounts/fireworks/models/deepseek-v3p1,
LLM_MODEL_CONFIG_bedrock_nova_micro_v1="amazon.nova-micro-v1:0,aws_access_key,aws_secret_key,region_name"
LLM_MODEL_CONFIG_bedrock_nova_lite_v1="amazon.nova-lite-v1:0,aws_access_key,aws_secret_key,region_name"
LLM_MODEL_CONFIG_bedrock_nova_pro_v1="amazon.nova-pro-v1:0,aws_access_key,aws_secret_key,region_name"
LLM_MODEL_CONFIG_minimax_m2.7="MiniMax-M2.7,minimax_api_key"
LLM_MODEL_CONFIG_minimax_m2.7_highspeed="MiniMax-M2.7-highspeed,minimax_api_key"
LLM_MODEL_CONFIG_ollama_llama3="llama3_model_name,model_local_url"
TRACK_USER_USAGE="true" #Add this if you want to track token usage
DAILY_TOKENS_LIMIT="250000" #Mandatory if TRACK_USER_USAGE is true
Expand Down
10 changes: 10 additions & 0 deletions backend/src/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,6 +110,16 @@ def get_llm(model: str):
client=bedrock_client,region_name=region_name, model_id=model_name, model_kwargs=dict(temperature=0),callbacks=callback_manager,
)

elif "MINIMAX" in model:
model_name, api_key = env_value.split(",")
llm = ChatOpenAI(
api_key=api_key,
base_url="https://api.minimax.io/v1",
model=model_name,
temperature=0,
callbacks=callback_manager,
)

elif "OLLAMA" in model:
model_name, base_url = env_value.split(",")
llm = ChatOllama(base_url=base_url, model=model_name,callbacks=callback_manager)
Expand Down
Empty file added backend/tests/__init__.py
Empty file.
20 changes: 20 additions & 0 deletions backend/tests/conftest.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
"""Test configuration for backend tests."""

import sys
import os

# Ensure the backend directory is on the Python path so that 'src' resolves
# to the project source rather than any system-level 'src' package.
backend_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if backend_dir not in sys.path:
sys.path.insert(0, backend_dir)

# Remove any system-level 'src' module so our project's src takes precedence
if "src" in sys.modules:
existing = sys.modules["src"]
if hasattr(existing, "__file__") and existing.__file__ and "site-packages" in existing.__file__:
del sys.modules["src"]
# Also remove any submodules
for key in list(sys.modules.keys()):
if key.startswith("src."):
del sys.modules[key]
64 changes: 64 additions & 0 deletions backend/tests/test_minimax_integration.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
"""Integration tests for MiniMax LLM provider.

These tests verify MiniMax API connectivity and model functionality.
They require a valid MINIMAX_API_KEY environment variable to run.

Run with: pytest tests/test_minimax_integration.py -v -m integration
"""

import os
import pytest

pytestmark = pytest.mark.integration

MINIMAX_API_KEY = os.environ.get("MINIMAX_API_KEY", "")
SKIP_REASON = "MINIMAX_API_KEY not set"


@pytest.mark.skipif(not MINIMAX_API_KEY, reason=SKIP_REASON)
class TestMiniMaxIntegration:
"""Integration tests that call the actual MiniMax API."""

def test_minimax_m27_chat_completion(self):
"""Test a basic chat completion with MiniMax M2.7."""
from langchain_openai import ChatOpenAI

llm = ChatOpenAI(
api_key=MINIMAX_API_KEY,
base_url="https://api.minimax.io/v1",
model="MiniMax-M2.7",
temperature=0,
)
response = llm.invoke("What is 2+2? Reply with just the number.")
assert response.content is not None
assert len(response.content) > 0
assert "4" in response.content

def test_minimax_m27_highspeed_chat_completion(self):
"""Test a basic chat completion with MiniMax M2.7-highspeed."""
from langchain_openai import ChatOpenAI

llm = ChatOpenAI(
api_key=MINIMAX_API_KEY,
base_url="https://api.minimax.io/v1",
model="MiniMax-M2.7-highspeed",
temperature=0,
)
response = llm.invoke("What is the capital of France? Reply in one word.")
assert response.content is not None
assert "Paris" in response.content

def test_minimax_structured_output_support(self):
"""Test that MiniMax supports structured output via with_structured_output."""
from langchain_openai import ChatOpenAI
from pydantic import BaseModel

llm = ChatOpenAI(
api_key=MINIMAX_API_KEY,
base_url="https://api.minimax.io/v1",
model="MiniMax-M2.7",
temperature=0,
)
# Verify the LLM object has with_structured_output method
# (needed for LLMGraphTransformer to use structured extraction)
assert hasattr(llm, "with_structured_output")
235 changes: 235 additions & 0 deletions backend/tests/test_minimax_provider.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,235 @@
"""Unit tests for MiniMax LLM provider integration.

These tests validate the MiniMax provider configuration, model routing logic,
and ChatOpenAI integration without requiring a running backend or Neo4j instance.
"""

import os
import sys
import pytest
from unittest.mock import patch, MagicMock, ANY
import importlib
import importlib.util


def _load_llm_module():
"""Load src/llm.py directly by file path to avoid 'src' namespace conflicts."""
llm_path = os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
"src", "llm.py"
)
spec = importlib.util.spec_from_file_location("_llm", llm_path)
mod = importlib.util.module_from_spec(spec)
return mod, spec, llm_path


class TestMiniMaxModelRouting:
"""Tests for MiniMax model detection in the get_llm() routing logic."""

def test_minimax_keyword_in_model_string(self):
"""Test that 'MINIMAX' is detected when the model key is uppercased."""
model = "minimax_m2.7"
upper = model.upper().replace(".", "_").strip()
assert "MINIMAX" in upper

def test_minimax_highspeed_keyword_in_model_string(self):
"""Test that 'MINIMAX' is detected for the highspeed variant."""
model = "minimax_m2.7_highspeed"
upper = model.upper().replace(".", "_").strip()
assert "MINIMAX" in upper

def test_minimax_not_confused_with_openai(self):
"""Test MiniMax is not confused with OpenAI models."""
minimax = "minimax_m2.7".upper()
openai = "openai_gpt_5_mini".upper()
assert "MINIMAX" in minimax and "OPENAI" not in minimax
assert "OPENAI" in openai and "MINIMAX" not in openai

def test_minimax_not_confused_with_gemini(self):
"""Test MiniMax is not confused with Gemini models."""
minimax = "minimax_m2.7".upper()
assert "GEMINI" not in minimax

def test_minimax_not_confused_with_anthropic(self):
"""Test MiniMax is not confused with Anthropic models."""
minimax = "minimax_m2.7".upper()
assert "ANTHROPIC" not in minimax

def test_minimax_not_confused_with_fireworks(self):
"""Test MiniMax is not confused with Fireworks models."""
minimax = "minimax_m2.7".upper()
assert "FIREWORKS" not in minimax

def test_minimax_branch_before_fallback(self):
"""Test that MINIMAX matching comes before the else (fallback) branch."""
llm_path = os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
"src", "llm.py"
)
with open(llm_path) as f:
content = f.read()
minimax_pos = content.find('"MINIMAX"')
else_pos = content.rfind("else:")
assert minimax_pos > 0, "MINIMAX branch not found in llm.py"
assert minimax_pos < else_pos, "MINIMAX branch should come before the fallback else"


class TestMiniMaxEnvConfig:
"""Tests for MiniMax environment variable configuration format."""

def test_env_key_standard_model(self):
"""Test env key generation for standard MiniMax model."""
model = "minimax_m2.7"
env_key = f"LLM_MODEL_CONFIG_{model.upper().replace('.', '_').strip()}"
assert env_key == "LLM_MODEL_CONFIG_MINIMAX_M2_7"

def test_env_key_highspeed_model(self):
"""Test env key generation for MiniMax highspeed model."""
model = "minimax_m2.7_highspeed"
env_key = f"LLM_MODEL_CONFIG_{model.upper().replace('.', '_').strip()}"
assert env_key == "LLM_MODEL_CONFIG_MINIMAX_M2_7_HIGHSPEED"

def test_env_value_parsing_two_fields(self):
"""Test parsing of 'model_name,api_key' format."""
env_value = "MiniMax-M2.7,sk-test-key-123"
model_name, api_key = env_value.split(",")
assert model_name == "MiniMax-M2.7"
assert api_key == "sk-test-key-123"

def test_env_value_parsing_highspeed(self):
"""Test parsing of highspeed model env value."""
env_value = "MiniMax-M2.7-highspeed,sk-test-key-456"
model_name, api_key = env_value.split(",")
assert model_name == "MiniMax-M2.7-highspeed"
assert api_key == "sk-test-key-456"


class TestMiniMaxChatOpenAIParams:
"""Tests verifying the correct ChatOpenAI parameters for MiniMax."""

def test_minimax_base_url(self):
"""Test that MiniMax uses the correct API base URL."""
expected_base_url = "https://api.minimax.io/v1"
assert expected_base_url.startswith("https://")
assert "minimax" in expected_base_url

def test_minimax_temperature_zero(self):
"""Test that temperature=0 is used for MiniMax (deterministic output)."""
temperature = 0
assert temperature == 0

def test_minimax_model_names_valid(self):
"""Test that MiniMax model names match the API specification."""
valid_models = ["MiniMax-M2.7", "MiniMax-M2.7-highspeed"]
for model in valid_models:
assert model.startswith("MiniMax-")

def test_minimax_source_code_uses_correct_params(self):
"""Verify the llm.py source contains correct MiniMax configuration."""
llm_path = os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
"src", "llm.py"
)
with open(llm_path) as f:
content = f.read()

# Check that MiniMax branch exists and uses correct params
assert '"MINIMAX" in model' in content
assert 'base_url="https://api.minimax.io/v1"' in content
assert "temperature=0" in content

def test_minimax_uses_chatopenai_class(self):
"""Verify MiniMax uses ChatOpenAI (OpenAI-compatible API)."""
llm_path = os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
"src", "llm.py"
)
with open(llm_path) as f:
content = f.read()

# Find the MINIMAX block and verify it uses ChatOpenAI
minimax_start = content.find('"MINIMAX" in model')
assert minimax_start > 0
# Find the next elif/else after MINIMAX
next_elif = content.find("elif", minimax_start + 1)
minimax_block = content[minimax_start:next_elif]
assert "ChatOpenAI" in minimax_block

def test_minimax_env_value_split_count(self):
"""Test that MiniMax env value splits into exactly 2 parts."""
llm_path = os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
"src", "llm.py"
)
with open(llm_path) as f:
content = f.read()

minimax_start = content.find('"MINIMAX" in model')
next_elif = content.find("elif", minimax_start + 1)
minimax_block = content[minimax_start:next_elif]
# Should split into model_name, api_key (2 parts)
assert 'env_value.split(",")' in minimax_block


class TestMiniMaxExampleEnv:
"""Tests for MiniMax entries in example.env."""

def test_example_env_contains_minimax(self):
"""Test that backend/example.env contains MiniMax configurations."""
env_path = os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
"example.env"
)
with open(env_path) as f:
content = f.read()

assert "LLM_MODEL_CONFIG_minimax_m2.7=" in content
assert "LLM_MODEL_CONFIG_minimax_m2.7_highspeed=" in content

def test_example_env_minimax_format(self):
"""Test that MiniMax example.env entries follow the correct format."""
env_path = os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
"example.env"
)
with open(env_path) as f:
for line in f:
if "minimax_m2.7=" in line.lower() and not line.strip().startswith("#"):
# Extract value part
_, value = line.strip().split("=", 1)
value = value.strip('"')
parts = value.split(",")
assert len(parts) == 2, f"MiniMax config should have 2 parts: {line}"
model_name, api_key_placeholder = parts
assert model_name.startswith("MiniMax-")


class TestMiniMaxFrontendConfig:
"""Tests for MiniMax entries in frontend Constants.ts."""

def test_frontend_constants_contains_minimax(self):
"""Test that frontend Constants.ts includes MiniMax models."""
constants_path = os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
"..", "frontend", "src", "utils", "Constants.ts"
)
with open(constants_path) as f:
content = f.read()

assert "minimax_m2.7" in content
assert "minimax_m2.7_highspeed" in content


class TestMiniMaxReadme:
"""Tests for MiniMax entries in README."""

def test_readme_lists_minimax(self):
"""Test that README.md lists MiniMax as a supported LLM."""
readme_path = os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
"..", "README.md"
)
with open(readme_path) as f:
content = f.read()

assert "MiniMax" in content
2 changes: 2 additions & 0 deletions frontend/src/utils/Constants.ts
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,8 @@ export const llms =
'fireworks_qwen3_30b',
'fireworks_gpt_oss',
'fireworks_kimi_k2p5',
'minimax_m2.7',
'minimax_m2.7_highspeed',
];

export const prodllms =
Expand Down