Debugging Guide
Debugging Guide
This guide provides systematic approaches to debug issues with Klira AI SDK, including tools, techniques, and best practices for identifying and resolving problems.
Debugging Fundamentals
Enable Debug Logging
The first step in debugging any Klira AI SDK issue is to enable comprehensive logging:
import loggingimport os
# Set up debug logging for all Klira AI componentslogging.basicConfig( level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', handlers=[ logging.StreamHandler(), # Console output logging.FileHandler('klira_debug.log') # File output ])
# Enable verbose mode in Klira AI SDKfrom klira import KliraKlira.init( app_name="DebugApp", api_key=os.getenv("KLIRA_API_KEY"), verbose=True, # Enables detailed internal logging debug_mode=True # Additional debug information)
# Enable specific logger categoriesloggers = [ "klira", "klira.sdk", "klira.guardrails", "klira.adapters", "klira.utils.framework_detection", "klira.utils.framework_registry",]
for logger_name in loggers: logger = logging.getLogger(logger_name) logger.setLevel(logging.DEBUG)Debug Environment Setup
Create a debugging environment with enhanced diagnostics:
import osimport sysimport tracebackfrom typing import Any, Dict
def setup_debug_environment() -> Dict[str, Any]: """Set up comprehensive debugging environment."""
debug_info = { "python_version": sys.version, "platform": sys.platform, "klira_version": None, "environment_vars": {}, "installed_packages": [], }
# Collect Klira AI version try: from klira.version import __version__ debug_info["klira_version"] = __version__ except ImportError: debug_info["klira_version"] = "Unknown - check installation"
# Collect relevant environment variables for key, value in os.environ.items(): if key.startswith("KLIRA_"): # Mask sensitive values if "API_KEY" in key or "PASSWORD" in key: debug_info["environment_vars"][key] = "***" else: debug_info["environment_vars"][key] = value
# Check installed packages try: import pkg_resources installed = [d.project_name for d in pkg_resources.working_set] relevant_packages = [p for p in installed if any( framework in p.lower() for framework in ["klira", "langchain", "crewai", "llama", "openai", "traceloop"] )] debug_info["installed_packages"] = relevant_packages except ImportError: debug_info["installed_packages"] = ["pkg_resources not available"]
return debug_info
# Use the debug setupdebug_info = setup_debug_environment()print("=== Debug Environment ===")for key, value in debug_info.items(): print(f"{key}: {value}")§° Debugging Tools & Utilities
1. SDK Health Check
Create a comprehensive health check function:
from typing import Dict, Anyfrom klira import Klirafrom klira.config import get_configfrom klira.guardrails.engine import GuardrailsEnginefrom klira.utils.framework_detection import detect_frameworkfrom klira.utils.framework_registry import FrameworkRegistry, LLMClientRegistry
def comprehensive_health_check() -> Dict[str, Any]: """Perform comprehensive SDK health check."""
health_report = { "overall_status": "unknown", "components": {}, "configuration": {}, "errors": [], "warnings": [] }
try: # 1. Configuration Check print("§ Checking Configuration...") config = get_config() config_errors = config.validate()
health_report["configuration"] = { "valid": len(config_errors) == 0, "errors": config_errors, "api_key_set": config.api_key is not None, "tracing_enabled": config.tracing_enabled, "policies_path": config.policies_path }
if config_errors: health_report["errors"].extend(config_errors)
# 2. SDK Initialization Check print(" Checking SDK Initialization...") try: client = Klira.get() health_report["components"]["sdk_client"] = { "status": "initialized", "available": True } except Exception as e: health_report["components"]["sdk_client"] = { "status": "error", "available": False, "error": str(e) } health_report["errors"].append(f"SDK client error: {e}")
# Overall status determination if health_report["errors"]: health_report["overall_status"] = "error" elif health_report["warnings"]: health_report["overall_status"] = "warning" else: health_report["overall_status"] = "healthy"
except Exception as e: health_report["overall_status"] = "critical_error" health_report["errors"].append(f"Health check failed: {e}") print(f"â Health check failed: {e}")
return health_report
# Run health checkprint("¥ Running Comprehensive Health Check...")health = comprehensive_health_check()
print(f"\n Health Check Results:")print(f"Overall Status: {health['overall_status'].upper()}")
if health["errors"]: print(f"\nâ Errors ({len(health['errors'])}):") for error in health["errors"]: print(f" â* {error}")
if health["warnings"]: print(f"\nâï¸ Warnings ({len(health['warnings'])}):") for warning in health["warnings"]: print(f" â* {warning}")2. Framework Detection Debugger
Debug framework detection issues:
from klira.utils.framework_detection import ( detect_framework, detect_framework_cached, clear_detection_cache, FRAMEWORK_DETECTION_PATTERNS)import sys
def debug_framework_detection(target_object=None): """Debug framework detection for a specific object or general environment."""
print(" Framework Detection Debug Report") print("=" * 50)
# 1. Show available detection patterns print("\n Available Framework Patterns:") for framework, patterns in FRAMEWORK_DETECTION_PATTERNS.items(): print(f" {framework}:") print(f" Modules: {patterns['modules']}") print(f" Classes: {patterns['classes']}") print(f" Import Names: {patterns['import_names']}")
# 2. Test general detection print("\n¯ General Framework Detection:") general_detection = detect_framework() print(f" Result: {general_detection}")
# 3. Test cached detection print("\n¾ Cached Framework Detection:") cached_detection = detect_framework_cached() print(f" Result: {cached_detection}")
# 4. Performance test print("\n[Fast] Performance Test:") import time
# Test original detection start = time.time() for _ in range(100): detect_framework() original_time = time.time() - start
# Test cached detection start = time.time() for _ in range(100): detect_framework_cached() cached_time = time.time() - start
print(f" Original detection (100 calls): {original_time*1000:.2f}ms") print(f" Cached detection (100 calls): {cached_time*1000:.2f}ms") print(f" Speedup: {original_time/cached_time:.1f}x")
# Example usage:debug_framework_detection()3. Policy Debugger
Debug guardrails and policy evaluation:
from klira.guardrails.engine import GuardrailsEnginefrom klira.guardrails.fast_rules import FastRulesEnginefrom klira.config import get_policies_path
def debug_policy_evaluation(test_message: str, direction: str = "inbound"): """Debug policy evaluation step by step."""
print(f"¡ï¸ Policy Evaluation Debug for: '{test_message[:50]}...'") print("=" * 60)
try: # 1. Check policy loading print("\n Step 1: Policy Loading") policies_path = get_policies_path() print(f"Policies path: {policies_path}")
fast_rules = FastRulesEngine(policies_path) print(f" Loaded {len(fast_rules.policies)} policies")
# 2. Fast Rules Evaluation print(f"\n[Fast] Step 2: Fast Rules Evaluation") fast_result = fast_rules.evaluate(test_message, direction=direction) print(f" Result: {'« BLOCKED' if fast_result.blocked else ' ALLOWED'}") print(f" Confidence: {fast_result.confidence:.2f}") print(f" Reason: {fast_result.reason}")
# 3. Full Guardrails Engine Evaluation print(f"\n Step 3: Full Guardrails Evaluation") engine = GuardrailsEngine.get_instance()
context = { "conversation_id": "debug_conversation", "user_id": "debug_user" }
full_result = engine.evaluate(test_message, direction=direction, context=context)
print(f" Final Result: {' ALLOWED' if full_result.allowed else '« BLOCKED'}") print(f" Confidence: {full_result.confidence:.2f}") print(f" Decision Layer: {full_result.decision_layer}")
except Exception as e: print(f"â Debug process failed: {e}") import traceback traceback.print_exc()
# Example usage:test_messages = [ "Hello, how can I help you?", "My SSN is 123-45-6789", "Let me think step by step about this problem..."]
for message in test_messages: debug_policy_evaluation(message) print("\n" + "="*80)§ª Testing & Validation
Unit Test Template
Create unit tests for your Klira AI SDK integration:
import unittestimport osfrom unittest.mock import patchfrom klira import Klirafrom klira.decorators import workflow, guardrailsfrom klira.config import get_config, reset_config
class TestKliraIntegration(unittest.TestCase): """Test suite for Klira AI SDK integration."""
def setUp(self): """Set up test environment.""" reset_config()
def tearDown(self): """Clean up after tests.""" reset_config()
@patch.dict(os.environ, {"KLIRA_API_KEY": "klira_test_key"}) def test_sdk_initialization(self): """Test basic SDK initialization.""" client = Klira.init( app_name="TestApp", api_key="klira_test_key", enabled=False # Don't actually send telemetry )
# Verify initialization self.assertIsNotNone(client)
# Verify configuration config = get_config() self.assertEqual(config.app_name, "TestApp") self.assertEqual(config.api_key, "klira_test_key")
def test_decorator_application(self): """Test that decorators can be applied without errors."""
@workflow(name="test_workflow") def test_function(x: int) -> int: return x * 2
# Function should work normally result = test_function(5) self.assertEqual(result, 10)
def test_framework_detection(self): """Test framework detection functionality.""" from klira.utils.framework_detection import detect_framework
# Should not crash framework = detect_framework() self.assertIsInstance(framework, str)
if __name__ == "__main__": unittest.main()Debugging Checklist
Use this checklist when debugging Klira AI SDK issues:
Initial Setup
- Verify correct package installation (
pip show klira) - Check Python version compatibility (>=3.10)
- Confirm API key format (starts with
klira_) - Test basic SDK initialization
- Enable debug logging
Configuration Issues
- Validate environment variables (
KLIRA_*) - Check configuration file syntax (if using)
- Verify policies path exists and contains valid YAML
- Test configuration validation
- Check for conflicting settings
Framework Integration
- Verify import order (Klira before framework)
- Check framework detection results
- Confirm adapter registration
- Test decorator application
- Verify framework-specific dependencies
Guardrails Issues
- Check policy loading and parsing
- Test individual policy evaluation
- Verify direction-specific policies (inbound/outbound)
- Check confidence thresholds
- Test with known good/bad inputs
Performance Issues
- Profile function execution times
- Check cache hit rates
- Monitor memory usage
- Test with different configuration options
- Identify bottlenecks in call stack
This systematic approach to debugging will help you quickly identify and resolve issues with Klira AI SDK.