Robust error handling is critical for BCI applications, especially in medical and assistive technology contexts where system failures can have serious consequences. NimbusSDK provides comprehensive validation and error detection to ensure reliable operation.
using NimbusSDK# 1. Invalid API Key Formattry NimbusSDK.install_core("short_key") # Too shortcatch e # Error: "Invalid API key format. API keys must be at least 20 characters." println("Installation failed: Please check your API key")end# 2. Core Already Installed# The core is installed once per machine.# To reinstall or update, you can force reinstall:if NimbusSDK.is_core_installed() println("Core already installed - no need to reinstall")else NimbusSDK.install_core("your-api-key")end# 3. Network Issues During Installationtry NimbusSDK.install_core("your-api-key")catch e # Error: "Cannot connect to Nimbus API. Please check your internet connection." println("Please check your internet connection and try again")end
Best Practice: Handle Installation Errors Gracefully
Copy
function safe_install_core(api_key::String; max_retries::Int=3) # Check if already installed if NimbusSDK.is_core_installed() println("✓ Core already installed") return true end for attempt in 1:max_retries try NimbusSDK.install_core(api_key) println("✓ Core installed successfully") return true catch e @warn "Installation attempt $attempt failed" exception=e if attempt < max_retries sleep_time = 2^attempt # Exponential backoff println("Retrying in $sleep_time seconds...") sleep(sleep_time) else @error "Installation failed after $max_retries attempts" return false end end end return falseend# Usageif !safe_install_core("your-api-key") println("❌ Cannot proceed without core installation") exit(1)end
NimbusSDK validates data before inference to catch common issues:
Copy
using NimbusSDK# 1. NaN or Inf valuesbad_features = randn(16, 250, 10)bad_features[5, 100, 3] = NaN # Inject NaNtry data = BCIData(bad_features, metadata) # Error: "Data contains NaN values. Please clean your data before inference."catch e println("Data validation failed: Clean your data") # Fix: Remove or interpolate NaN values clean_features = replace_nan_with_mean(bad_features)end# 2. Dimension mismatcheswrong_dim_features = randn(16, 250, 10, 2) # 4D instead of 3Dtry data = BCIData(wrong_dim_features, metadata) # Error: "Features must be 2D or 3D array, got 4D"catch e println("Dimension error: Expected 2D or 3D array")end# 3. Feature dimension mismatchmetadata = BCIMetadata( sampling_rate = 250.0, n_features = 16, # Expects 16 features n_classes = 4)features = randn(8, 250, 10) # Only 8 features!try data = BCIData(features, metadata) model = load_model(RxLDAModel, "motor_imagery_4class_v1") results = predict_batch(model, data) # Error: "Data has 8 features but model expects 16 features"catch e println("Feature dimension mismatch: Check your preprocessing")end
Best Practice: Pre-validation Function
Copy
function validate_bci_data(features, metadata) errors = String[] warnings = String[] # Check for NaN/Inf if any(isnan.(features)) push!(errors, "Data contains NaN values") end if any(isinf.(features)) push!(errors, "Data contains Inf values") end # Check dimensions if ndims(features) < 2 || ndims(features) > 3 push!(errors, "Features must be 2D or 3D, got $(ndims(features))D") end # Check feature dimension actual_features = size(features, 1) if actual_features != metadata.n_features push!(errors, "Feature count mismatch: $actual_features vs $(metadata.n_features)") end # Check for suspicious statistics feature_std = std(features) if feature_std < 1e-10 push!(warnings, "Very small std dev ($feature_std) - data may not be scaled") end if feature_std > 1e6 push!(warnings, "Very large std dev ($feature_std) - consider normalization") end # Report issues if !isempty(errors) println("❌ Validation Errors:") for err in errors println(" • $err") end return false end if !isempty(warnings) println("⚠️ Validation Warnings:") for warn in warnings println(" • $warn") end end return trueend# Usageif validate_bci_data(features, metadata) data = BCIData(features, metadata) results = predict_batch(model, data)else println("Fix data issues before inference")end
using NimbusSDK# Load a 4-class motor imagery modelmodel = load_model(RxLDAModel, "motor_imagery_4class_v1")# But provide 2-class datametadata = BCIMetadata( sampling_rate = 250.0, n_features = 16, n_classes = 2 # Wrong!)try results = predict_batch(model, data) # Error: "Data has 2 classes but model expects 4 classes"catch e println("Class count mismatch: Use the correct model for your data")end
Best Practice: Check Compatibility Before Inference
Copy
function check_compatibility(data::BCIData, model) # Get model requirements model_features = get_n_features(model) model_classes = get_n_classes(model) # Compare compatible = true if data.metadata.n_features != model_features @error "Feature mismatch" data=data.metadata.n_features model=model_features compatible = false end if data.metadata.n_classes != model_classes @error "Class count mismatch" data=data.metadata.n_classes model=model_classes compatible = false end return compatibleend# Usageif check_compatibility(data, model) results = predict_batch(model, data)else println("Model and data are incompatible")end
using NimbusSDK# 1. Chunk size mismatchmetadata = BCIMetadata( chunk_size = 250, # Expects 250 samples per chunk # ... other params)session = init_streaming(model, metadata)# But provide wrong-sized chunkwrong_chunk = randn(16, 125) # Only 125 samples!try result = process_chunk(session, wrong_chunk) # Warning: "Chunk size mismatch" expected=250 actual=125catch e println("Chunk size mismatch")end# 2. Feature dimension mismatchwrong_features_chunk = randn(8, 250) # Only 8 features instead of 16!try result = process_chunk(session, wrong_features_chunk) # Error: "Chunk feature dimension (8) does not match metadata (16)"catch e println("Feature dimension mismatch in chunk")end
Best Practice: Validate Chunks Before Processing
Copy
function safe_process_chunk(session, chunk, metadata) # Validate chunk dimensions n_features, n_samples = size(chunk) if n_features != metadata.n_features @error "Feature dimension mismatch" expected=metadata.n_features actual=n_features return nothing end if !isnothing(metadata.chunk_size) && n_samples != metadata.chunk_size @warn "Chunk size mismatch" expected=metadata.chunk_size actual=n_samples end # Check for invalid values if any(isnan.(chunk)) || any(isinf.(chunk)) @error "Chunk contains NaN or Inf values" return nothing end # Process chunk try result = process_chunk(session, chunk) return result catch e @error "Chunk processing failed" exception=e return nothing endend# Usage in streaming loopfor raw_chunk in eeg_stream result = safe_process_chunk(session, raw_chunk, metadata) if !isnothing(result) handle_prediction(result) else # Handle failed chunk @warn "Skipping invalid chunk" endend
Use built-in diagnostics to identify data quality issues:
Copy
using NimbusSDK# Collect a diagnostic trialfeatures = collect_your_eeg_trial() # Your acquisition codemetadata = BCIMetadata(...)data = BCIData(features, metadata)# Run diagnosticsreport = diagnose_preprocessing(data)# Check qualityprintln("Quality Score: $(round(report.quality_score * 100, digits=1))%")if report.quality_score < 0.7 println("\n⚠️ Data quality issues detected:") # Show warnings if !isempty(report.warnings) for warning in report.warnings println(" • $warning") end end # Show recommendations if !isempty(report.recommendations) println("\nRecommendations:") for rec in report.recommendations println(" • $rec") end end # Decide whether to proceed if report.quality_score < 0.5 println("\n❌ Quality too low - cannot proceed") exit(1) else println("\n⚠️ Proceeding with caution") endelse println("✓ Data quality is acceptable")end
using Logging# Development: Verbose loggingdev_logger = ConsoleLogger(stdout, Logging.Debug)# Production: Errors and warnings onlyprod_logger = ConsoleLogger(stdout, Logging.Warn)# Custom: Log to filefunction create_file_logger(path::String) io = open(path, "w") return ConsoleLogger(io, Logging.Info)end# Use loggerglobal_logger(prod_logger)# Or use for specific blockswith_logger(dev_logger) do # Detailed logging for this section results = predict_batch(model, data)end
✅ Always validate data before inference
✅ Check compatibility between model and data
✅ Handle authentication errors gracefully with offline fallback
✅ Pre-allocate buffers in real-time loops to avoid errors
✅ Use try-catch around critical operations
✅ Log warnings but continue execution when safe
✅ Implement retry logic for transient failures
✅ Run preprocessing diagnostics when confidence is low
✅ Monitor error rates in production systems
✅ Test error paths during development