Advanced BCI Applications
This section demonstrates sophisticated applications of NimbusSDK for complex BCI scenarios. All examples use the actual Bayesian LDA (RxLDA) and Bayesian GMM (RxGMM) models with real Julia SDK code.Cross-Subject Model Training
Train models that generalize across multiple subjects.Multi-Subject Training Dataset
Copy
using NimbusSDK
NimbusSDK.install_core("your-api-key")
# Collect data from multiple subjects
subjects = ["S001", "S002", "S003", "S004", "S005"]
all_features = []
all_labels = []
for subject_id in subjects
println("Loading subject $subject_id...")
subj_features, subj_labels = load_subject_data(subject_id)
push!(all_features, subj_features)
push!(all_labels, subj_labels)
end
# Concatenate all subject data
combined_features = cat(all_features..., dims=3)
combined_labels = vcat(all_labels...)
println("\nCombined dataset:")
println(" Total trials: $(size(combined_features, 3))")
println(" Features: $(size(combined_features, 1))")
println(" Subjects: $(length(subjects))")
# Train cross-subject model
train_data = BCIData(
combined_features,
BCIMetadata(
sampling_rate = 250.0,
paradigm = :motor_imagery,
feature_type = :csp,
n_features = 16,
n_classes = 4
),
combined_labels
)
cross_subject_model = train_model(
RxLDAModel, # Bayesian LDA - or use RxGMMModel for Bayesian GMM with more flexibility
train_data;
iterations = 75, # More iterations for larger dataset
showprogress = true,
name = "cross_subject_5subj",
description = "Motor imagery trained on 5 subjects"
)
save_model(cross_subject_model, "cross_subject_model.jld2")
Transfer to New Subject
Use cross-subject model as baseline for new subjects:Copy
using NimbusSDK
# Load cross-subject baseline
baseline = load_model(RxLDAModel, "cross_subject_model.jld2")
# Collect minimal calibration from new subject (Subject 006)
new_subj_calib = collect_calibration_trials(
subject_id="S006",
trials_per_class=12 # Only 12 trials per class needed!
)
# Calibrate for new subject
personalized = calibrate_model(
baseline,
new_subj_calib;
iterations = 20
)
# Test on new subject
test_data_new = load_subject_test_data("S006")
results = predict_batch(personalized, test_data_new)
accuracy = sum(results.predictions .== test_data_new.labels) / length(test_data_new.labels)
println("New subject accuracy: $(round(accuracy * 100, digits=1))%")
println("(With only 48 calibration trials total!)")
Hybrid BCI: Combining Multiple Paradigms
Combine motor imagery and P300 for robust control.Dual-Paradigm System
Copy
using NimbusSDK
NimbusSDK.install_core("your-api-key")
# Load both models
mi_model = load_model(RxLDAModel, "motor_imagery_4class_v1") # Bayesian LDA
p300_model = load_model(RxLDAModel, "p300_binary_v1") # Bayesian LDA
struct HybridBCIResult
mi_prediction::Int
mi_confidence::Float64
p300_confirmed::Bool
p300_confidence::Float64
final_action::Union{Int, Nothing}
end
function hybrid_inference(mi_trial, p300_trial)
"""
Combined motor imagery + P300 for high-confidence control
"""
# Motor imagery: Determine intended direction
mi_results = predict_batch(mi_model, mi_trial)
mi_pred = mi_results.predictions[1]
mi_conf = mi_results.confidences[1]
# P300: Confirm intention
p300_results = predict_batch(p300_model, p300_trial)
p300_confirmed = p300_results.predictions[1] == 1 # Target detected
p300_conf = p300_results.confidences[1]
# Decision logic: Require both high confidence
final_action = nothing
if mi_conf > 0.75 && p300_confirmed && p300_conf > 0.75
final_action = mi_pred
end
return HybridBCIResult(
mi_pred,
mi_conf,
p300_confirmed,
p300_conf,
final_action
)
end
# Example usage: Wheelchair control
function hybrid_wheelchair_control()
while is_active()
# Collect motor imagery trial (direction intent)
mi_trial = collect_mi_trial()
# Collect P300 confirmation trial
p300_trial = collect_p300_confirmation()
result = hybrid_inference(mi_trial, p300_trial)
if !isnothing(result.final_action)
println("✓ Confirmed action: $(result.final_action)")
execute_wheelchair_command(result.final_action)
else
println("⚠️ Low confidence - no action")
println(" MI: $(round(result.mi_confidence, digits=2))")
println(" P300: $(result.p300_confirmed) ($(round(result.p300_confidence, digits=2)))")
end
end
end
Continuous Control with Smoothing
Implement smooth continuous control for cursor/prosthetics.Exponential Moving Average
Copy
using NimbusSDK
# Setup streaming
session = init_streaming(model, metadata)
# Smoothing parameters
struct MovingAverageFilter
predictions::Vector{Int}
confidences::Vector{Float64}
alpha::Float64 # Smoothing factor (0-1)
window_size::Int
end
function smooth_prediction(filter::MovingAverageFilter, new_pred::Int, new_conf::Float64)
push!(filter.predictions, new_pred)
push!(filter.confidences, new_conf)
# Keep only recent window
if length(filter.predictions) > filter.window_size
popfirst!(filter.predictions)
popfirst!(filter.confidences)
end
# Weighted voting by confidence
class_scores = zeros(4)
for (pred, conf) in zip(filter.predictions, filter.confidences)
class_scores[pred] += conf
end
# Smoothed prediction
smoothed_pred = argmax(class_scores)
smoothed_conf = maximum(class_scores) / sum(class_scores)
return (prediction=smoothed_pred, confidence=smoothed_conf)
end
# Real-time control with smoothing
filter = MovingAverageFilter(Int[], Float64[], 0.7, 5)
for chunk in eeg_stream
# Get raw prediction
raw_result = process_chunk(session, chunk)
# Apply smoothing
smoothed = smooth_prediction(filter, raw_result.prediction, raw_result.confidence)
# Execute smoothed command
if smoothed.confidence > 0.7
execute_smooth_control(smoothed.prediction, smoothed.confidence)
end
end
Velocity-Based Control
Control velocity instead of discrete actions:Copy
using NimbusSDK
function velocity_control(model, eeg_stream)
session = init_streaming(model, metadata)
velocity = [0.0, 0.0] # [vx, vy]
max_velocity = 1.0
acceleration = 0.1
friction = 0.95
for chunk in eeg_stream
result = process_chunk(session, chunk)
if result.confidence > 0.7
# Map predictions to velocity changes
if result.prediction == 1 # Left
velocity[1] -= acceleration * result.confidence
elseif result.prediction == 2 # Right
velocity[1] += acceleration * result.confidence
elseif result.prediction == 3 # Up
velocity[2] += acceleration * result.confidence
elseif result.prediction == 4 # Down
velocity[2] -= acceleration * result.confidence
end
end
# Apply friction
velocity .*= friction
# Clamp velocity
velocity = clamp.(velocity, -max_velocity, max_velocity)
# Update position
update_cursor_velocity(velocity)
end
end
Adaptive Learning During Use
Update model parameters online based on implicit feedback.Online Model Adaptation
Copy
using NimbusSDK
# Start with baseline Bayesian LDA model
model = load_model(RxLDAModel, "motor_imagery_4class_v1")
# Collect online data with implicit labels
online_features = []
online_labels = []
adaptation_interval = 20 # Adapt every 20 trials
for trial_idx in 1:200
# Collect trial
trial_features = collect_trial()
# Get prediction
trial_data = BCIData(trial_features, metadata)
result = predict_batch(model, trial_data)
# Implicit labeling: Assume high-confidence predictions are correct
if result.confidences[1] > 0.85
push!(online_features, trial_features)
push!(online_labels, result.predictions[1])
println("Trial $trial_idx: Confident prediction added to adaptation set")
end
# Periodic adaptation
if trial_idx % adaptation_interval == 0 && length(online_labels) >= 10
println("\n=== Adapting model at trial $trial_idx ===")
# Create adaptation dataset
adapt_features = cat(online_features..., dims=3)
adapt_data = BCIData(adapt_features, metadata, online_labels)
# Calibrate with online data
model = calibrate_model(model, adapt_data; iterations=15)
println("Model adapted with $(length(online_labels)) trials")
# Clear buffer (or keep sliding window)
online_features = online_features[end-10:end]
online_labels = online_labels[end-10:end]
end
end
Multi-Session Experiment Design
Run systematic experiments across multiple sessions.Experiment Framework
Copy
using NimbusSDK
using Dates
struct ExperimentSession
session_id::String
subject_id::String
date::DateTime
model_type::Symbol # :rxlda or :rxgmm
n_trials::Int
results::Union{Nothing, BatchResult}
accuracy::Union{Nothing, Float64}
itr::Union{Nothing, Float64}
end
function run_experiment_session(
subject_id::String,
session_id::String,
model_type::Symbol,
n_trials::Int
)
println("\n" * "="^60)
println("Experiment Session: $session_id")
println("Subject: $subject_id")
println("Model: $model_type")
println("Date: $(Dates.now())")
println("="^60)
# Authenticate
NimbusSDK.install_core("your-api-key")
# Load appropriate model
model = if model_type == :rxlda
load_model(RxLDAModel, "motor_imagery_4class_v1") # Bayesian LDA
else
load_model(RxGMMModel, "motor_imagery_4class_gmm_v1") # Bayesian GMM
end
# Collect data
println("\nCollecting $n_trials trials...")
features, labels = collect_experiment_trials(n_trials)
data = BCIData(
features,
BCIMetadata(
sampling_rate = 250.0,
paradigm = :motor_imagery,
feature_type = :csp,
n_features = 16,
n_classes = 4
),
labels
)
# Run inference
println("Running inference...")
results = predict_batch(model, data)
# Calculate metrics
accuracy = sum(results.predictions .== labels) / length(labels)
itr = calculate_ITR(accuracy, 4, 4.0)
println("\n" * "="^60)
println("Session Results")
println("="^60)
println("Accuracy: $(round(accuracy * 100, digits=1))%")
println("ITR: $(round(itr, digits=1)) bits/minute")
println("Mean confidence: $(round(mean(results.confidences), digits=3))")
println("="^60)
# Return session data
return ExperimentSession(
session_id,
subject_id,
Dates.now(),
model_type,
n_trials,
results,
accuracy,
itr
)
end
# Run multi-session experiment
function run_multi_session_experiment()
subjects = ["S001", "S002", "S003"]
sessions = []
for subject in subjects
# Session 1: RxLDA baseline
session1 = run_experiment_session(
subject,
"$(subject)_session1_rxlda",
:rxlda, # Bayesian LDA
80
)
push!(sessions, session1)
# Session 2: RxGMM comparison
session2 = run_experiment_session(
subject,
"$(subject)_session2_rxgmm",
:rxgmm, # Bayesian GMM
80
)
push!(sessions, session2)
end
# Aggregate results
println("\n\n" * "="^60)
println("Experiment Summary")
println("="^60)
for session in sessions
println("\n$(session.session_id):")
println(" Accuracy: $(round(session.accuracy * 100, digits=1))%")
println(" ITR: $(round(session.itr, digits=1)) bits/min")
end
# Statistical analysis
rxlda_accs = [s.accuracy for s in sessions if s.model_type == :rxlda]
rxgmm_accs = [s.accuracy for s in sessions if s.model_type == :rxgmm]
println("\n" * "="^60)
println("Model Comparison")
println("="^60)
println("Bayesian LDA mean accuracy: $(round(mean(rxlda_accs) * 100, digits=1))% ± $(round(std(rxlda_accs) * 100, digits=1))%")
println("Bayesian GMM mean accuracy: $(round(mean(rxgmm_accs) * 100, digits=1))% ± $(round(std(rxgmm_accs) * 100, digits=1))%")
return sessions
end
Error Recovery and Robustness
Handle errors and maintain reliable operation.Automatic Retry with Fallback
Copy
using NimbusSDK
function robust_inference(model, data; max_retries=3)
"""
Inference with automatic retry and fallback
"""
for attempt in 1:max_retries
try
results = predict_batch(model, data)
# Quality check
quality = assess_trial_quality(results)
if quality.confidence_acceptable
return (success=true, results=results, attempts=attempt)
else
@warn "Low quality results" quality.overall_score attempt
if attempt < max_retries
println("Retrying...")
continue
end
end
catch e
@error "Inference failed" exception=e attempt
if attempt < max_retries
println("Retrying after error...")
sleep(0.1) # Brief pause before retry
continue
else
# All retries exhausted
return (success=false, results=nothing, attempts=attempt)
end
end
end
# If we get here, all retries failed
return (success=false, results=nothing, attempts=max_retries)
end
# Usage
result = robust_inference(model, data)
if result.success
println("Inference successful after $(result.attempts) attempt(s)")
process_results(result.results)
else
println("⚠️ Inference failed after $(result.attempts) attempts")
handle_failure()
end
Session Health Monitoring
Copy
using NimbusSDK
struct SessionHealth
trial_count::Int
success_count::Int
failure_count::Int
mean_confidence::Float64
recent_accuracies::Vector{Float64}
end
function monitor_session_health(
model,
eeg_stream;
health_window=10
)
health = SessionHealth(0, 0, 0, 0.0, Float64[])
for trial in eeg_stream
health.trial_count += 1
try
# Run inference
data = BCIData(trial.features, metadata, trial.label)
results = predict_batch(model, data)
# Update health metrics
health.success_count += 1
health.mean_confidence = (
(health.mean_confidence * (health.success_count - 1) +
results.confidences[1]) / health.success_count
)
# Track recent accuracy
if !isnothing(trial.label)
correct = results.predictions[1] == trial.label
push!(health.recent_accuracies, Float64(correct))
if length(health.recent_accuracies) > health_window
popfirst!(health.recent_accuracies)
end
end
# Health checks
if health.mean_confidence < 0.6
@warn "Mean confidence dropping" health.mean_confidence
end
if length(health.recent_accuracies) >= health_window
recent_acc = mean(health.recent_accuracies)
if recent_acc < 0.65
@warn "Recent accuracy low - consider recalibration" recent_acc
end
end
catch e
health.failure_count += 1
@error "Trial failed" exception=e trial_num=health.trial_count
if health.failure_count / health.trial_count > 0.1
error("Session failure rate too high - aborting")
end
end
# Periodic health report
if health.trial_count % 20 == 0
println("\n=== Session Health Report ===")
println("Trials: $(health.trial_count)")
println("Success rate: $(round(100 * health.success_count / health.trial_count, digits=1))%")
println("Mean confidence: $(round(health.mean_confidence, digits=3))")
if !isempty(health.recent_accuracies)
println("Recent accuracy: $(round(mean(health.recent_accuracies) * 100, digits=1))%")
end
println("="^30)
end
end
return health
end
Next Steps
Basic Examples
Fundamental BCI examples
Code Samples
Complete working code
Industry Use Cases
Real-world applications
Julia SDK
Complete SDK reference
All examples use actual NimbusSDK.jl functions and real Bayesian LDA (RxLDA) and Bayesian GMM (RxGMM) models. These are production-ready patterns used in real BCI applications.