from nimbus_bci import NimbusLDAimport numpy as np# Generate sample data (in practice, use your preprocessed EEG features)np.random.seed(42)n_samples, n_features = 100, 16X_train = np.random.randn(n_samples, n_features)y_train = np.random.randint(0, 4, n_samples) # 4-class problem# Create and fit classifierclf = NimbusLDA()clf.fit(X_train, y_train)# Predict on new dataX_test = np.random.randn(20, n_features)predictions = clf.predict(X_test)probabilities = clf.predict_proba(X_test)print(f"Predictions: {predictions}")print(f"Probabilities shape: {probabilities.shape}") # (20, 4)
Key concepts:NimbusLDA() creates a Bayesian LDA classifier, fit() trains the model on labeled data, predict() returns class predictions, and predict_proba() returns full posterior distributions.
Common hyperparameters:mu_scale controls prior precision on class means (higher = more regularization), and class_prior_alpha sets the Dirichlet prior on class probabilities.
5
Online Learning
Update models incrementally with new data:
Copy
from nimbus_bci import NimbusLDA# Initial trainingclf = NimbusLDA()clf.fit(X_train, y_train)# Later: update with new data (no retraining from scratch)X_new = np.random.randn(10, n_features)y_new = np.random.randint(0, 4, 10)clf.partial_fit(X_new, y_new)print("Model updated with new data")
partial_fit() enables adaptive BCI systems that learn from user feedback during operation.
6
Batch Inference with Diagnostics
Use batch inference for comprehensive diagnostics:
Copy
from nimbus_bci import predict_batch, NimbusLDAfrom nimbus_bci.data import BCIData, BCIMetadata# Train modelclf = NimbusLDA()clf.fit(X_train, y_train)# Prepare data for batch inferencemetadata = BCIMetadata( sampling_rate=250.0, paradigm="motor_imagery", feature_type="csp", n_features=16, n_classes=4)# X_test should be shape (n_features, n_samples, n_trials) for BCIData# For this example, reshape: (n_trials, n_features) -> (n_features, 1, n_trials)X_test_reshaped = X_test.T[:, np.newaxis, :]data = BCIData(X_test_reshaped, metadata)# Run batch inferenceresult = predict_batch(clf.model_, data)print(f"Predictions: {result.predictions}")print(f"Mean entropy: {result.mean_entropy:.2f} bits")print(f"Balance: {result.balance:.2%}")print(f"Latency: {result.latency_ms:.1f}ms")
Diagnostics available:entropy (prediction uncertainty), balance (class distribution balance), and latency_ms (inference time per trial).
7
Streaming Inference
Process data in real-time chunks:
Copy
from nimbus_bci import NimbusLDA, StreamingSessionfrom nimbus_bci.data import BCIMetadata# Train modelclf = NimbusLDA()clf.fit(X_train, y_train)# Setup streaming sessionmetadata = BCIMetadata( sampling_rate=250.0, paradigm="motor_imagery", feature_type="csp", n_features=16, n_classes=4, chunk_size=125, # 500ms chunks at 250 Hz temporal_aggregation="logvar")session = StreamingSession(clf.model_, metadata)# Simulate streaming data (in practice, from real-time EEG)n_chunks = 4for i in range(n_chunks): # Each chunk: (n_features, chunk_size) chunk = np.random.randn(16, 125) result = session.process_chunk(chunk) print(f"Chunk {i+1}: class {result.prediction} (conf: {result.confidence:.2%})")# Finalize trial with aggregationfinal = session.finalize_trial(method="weighted_vote")print(f"\nFinal prediction: class {final.prediction}")print(f"Entropy: {final.entropy:.2f} bits")
Streaming inference enables real-time BCI applications with sub-20ms latency per chunk.
from nimbus_bci import estimate_normalization_params, apply_normalization# Estimate normalization from training datanorm_params = estimate_normalization_params(X_train, method="zscore")# Apply to all dataX_train_norm = apply_normalization(X_train, norm_params)X_test_norm = apply_normalization(X_test, norm_params)# Train on normalized dataclf = NimbusLDA()clf.fit(X_train_norm, y_train)# Predict on normalized test datapredictions = clf.predict(X_test_norm)
Always use the same normalization parameters for training and testing data. Estimate from training set, then apply to test set.
from nimbus_bci import NimbusLDA# Initial trainingclf = NimbusLDA()clf.fit(X_calibration, y_calibration)# During use: adapt to userfor trial in online_session: prediction = clf.predict(trial.features) # Get feedback (e.g., from user or task) true_label = get_feedback() # Update model clf.partial_fit(trial.features.reshape(1, -1), [true_label])