The mistral_api.py module provides comprehensive integration with Mistral AI's extensive API suite, enabling mindX agents to leverage advanced AI capabilities including reasoning, code generation, embeddings, classification, and more.
mistral_api.py
├── MistralAPIClient # Low-level API client
├── MistralIntegration # High-level mindX integration
├── MistralConfig # Configuration management
├── Data Classes # Request/response structures
└── Utility Functions # Helper functions
The low-level client providing access to all Mistral AI API endpoints.
Key Features:
Usage:
async with MistralAPIClient(config) as client:
response = await client.chat_completion(request)
High-level integration class designed specifically for mindX agents.
Key Features:
Usage:
async with MistralIntegration(config) as mistral:
reasoning = await mistral.enhance_reasoning(context, question)
Configuration management for API settings.
Parameters:
api_key: Mistral API keybase_url: API base URL (default: https://api.mistral.ai/v1)timeout: Request timeout in seconds (default: 30)max_retries: Maximum retry attempts (default: 3)rate_limit_delay: Delay between requests (default: 0.1s)/v1/chat/completions/v1/fim/completions/v1/agents//v1/embeddings/v1/moderations, /v1/classifications/v1/files//v1/fine_tuning//v1/models//v1/batch//v1/ocr/v1/audio/transcriptions/v1/conversations//v1/libraries/*reasoning = await mistral.enhance_reasoning(
context="mindX autonomous system context",
question="How to optimize agent coordination?",
model=MistralModel.MISTRAL_LARGE_LATEST
)
Use Cases:
code = await mistral.generate_code(
prompt="def optimize_agent_performance(",
suffix="return optimized_result",
model=MistralModel.CODESTRAL_LATEST
)
Use Cases:
embeddings = await mistral.create_embeddings_for_memory([
"Agent coordination principles",
"Memory management strategies"
])
Use Cases:
classification = await mistral.classify_agent_intent(
"I need help with code generation"
)
Use Cases:
moderation = await mistral.moderate_agent_output(
"Agent response content"
)
Use Cases:
result = await mistral.process_document(
file_path="/path/to/document.pdf",
library_id="knowledge-base-123"
)
Use Cases:
transcript = await mistral.transcribe_audio_for_agent(
file_path="/path/to/audio.wav",
language="en"
)
Use Cases:
from api.mistral_api import MistralIntegration, create_mistral_config
Create configuration
config = create_mistral_config(api_key="your-api-key-here")
Use high-level integration
async with MistralIntegration(config) as mistral:
# Your operations here
pass
async def enhance_agent_reasoning(agent_context, user_query):
async with MistralIntegration(config) as mistral:
reasoning = await mistral.enhance_reasoning(
context=agent_context,
question=user_query,
model=MistralModel.MISTRAL_LARGE_LATEST
)
return reasoning
async def generate_agent_tool(tool_description):
async with MistralIntegration(config) as mistral:
code = await mistral.generate_code(
prompt=f"def {tool_description}(",
suffix="return result",
model=MistralModel.CODESTRAL_LATEST
)
return code
async def store_knowledge_in_memory(knowledge_items):
async with MistralIntegration(config) as mistral:
embeddings = await mistral.create_embeddings_for_memory(knowledge_items)
# Store in mindX memory system
for item, embedding in zip(knowledge_items, embeddings):
await memory_agent.store_with_embedding(item, embedding)
async def process_document_pipeline(file_path):
async with MistralIntegration(config) as mistral:
# Process document
result = await mistral.process_document(file_path)
# Extract insights
insights = await mistral.enhance_reasoning(
context=result["ocr_result"]["document_annotation"],
question="What are the key insights from this document?"
)
return {
"file_info": result["file_info"],
"extracted_text": result["ocr_result"],
"insights": insights
}
# Required
MISTRAL_API_KEY=your-mistral-api-key
Optional
MISTRAL_BASE_URL=https://api.mistral.ai/v1
MISTRAL_TIMEOUT=30
MISTRAL_MAX_RETRIES=3
MISTRAL_RATE_LIMIT_DELAY=0.1
from api.mistral_api import MistralConfig
config = MistralConfig(
api_key="your-api-key",
base_url="https://api.mistral.ai/v1",
timeout=30,
max_retries=3,
rate_limit_delay=0.1
)
MistralAPIError: Custom exception for API-related errorsaiohttp.ClientError: Network and connection errorsjson.JSONDecodeError: Response parsing errorstry:
response = await client.chat_completion(request)
except MistralAPIError as e:
logger.error(f"Mistral API error: {e}")
# Handle API-specific errors
except aiohttp.ClientError as e:
logger.error(f"Network error: {e}")
# Handle network errors
except Exception as e:
logger.error(f"Unexpected error: {e}")
# Handle unexpected errors
The client automatically retries failed requests with exponential backoff:
Consider implementing caching for:
class EnhancedBDIAgent(BDIAgent):
def __init__(self, mistral_config):
super().__init__()
self.mistral_config = mistral_config
async def enhanced_reasoning(self, context, goal):
async with MistralIntegration(self.mistral_config) as mistral:
return await mistral.enhance_reasoning(context, goal)
class EnhancedMemoryAgent(MemoryAgent):
def __init__(self, mistral_config):
super().__init__()
self.mistral_config = mistral_config
async def store_with_embeddings(self, content):
async with MistralIntegration(self.mistral_config) as mistral:
embeddings = await mistral.create_embeddings_for_memory([content])
return await self.store(content, embeddings[0])
class EnhancedCoordinatorAgent(CoordinatorAgent):
def __init__(self, mistral_config):
super().__init__()
self.mistral_config = mistral_config
async def classify_and_route(self, message):
async with MistralIntegration(self.mistral_config) as mistral:
classification = await mistral.classify_agent_intent(message)
return await self.route_to_agent(message, classification)
from api.mistral_api import test_mistral_connection
async def test_connection():
config = create_mistral_config(api_key="test-key")
success = await test_mistral_connection(config)
assert success, "Failed to connect to Mistral API"
import pytest
from api.mistral_api import MistralIntegration, MistralConfig
@pytest.mark.asyncio
async def test_enhance_reasoning():
config = MistralConfig(api_key="test-key")
async with MistralIntegration(config) as mistral:
result = await mistral.enhance_reasoning("test context", "test question")
assert isinstance(result, str)
assert len(result) > 0
import logging
Configure logging for Mistral API
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger('mistral_api')
Log API calls
logger.info(f"Making API call to {endpoint}")
logger.debug(f"Request payload: {payload}")
logger.info(f"Response received: {response}")
import time
async def monitored_api_call():
start_time = time.time()
try:
result = await client.chat_completion(request)
duration = time.time() - start_time
logger.info(f"API call completed in {duration:.2f}s")
return result
except Exception as e:
duration = time.time() - start_time
logger.error(f"API call failed after {duration:.2f}s: {e}")
raise
rate_limit_delay
- Reduce concurrent requests
- Implement exponential backoff
timeout value
- Check network connectivity
- Verify API endpoint availability
import logging
Enable debug logging
logging.getLogger('mistral_api').setLevel(logging.DEBUG)
Enable request/response logging
config = MistralConfig(
api_key="your-key",
debug=True # Enable debug mode
)
The Mistral AI API integration provides mindX with comprehensive access to advanced AI capabilities while maintaining the modular, non-invasive architecture required for the hackathon. The integration supports all major Mistral AI services and provides mindX-specific helper methods for seamless agent enhancement.
For more information, refer to: