Getting Started with Delta with Deep Learning
Adding Delta to Your Project
To add the Delta library to your Rust project, you need to include it in your
Cargo.toml
file. Follow these steps:
- Open your project’s
Cargo.toml
file. - Add the following line under
[dependencies]
:
[dependencies]deltaml = "0.1.0"
Currently, we have published Delta to deltaml, but note that this is still experimental in alpha stage so things might break in the upcoming iterations.
Delta Usage Example
1. Create the main
Function
We start with an empty asynchronous main function using #[tokio::main]
.
#[tokio::main]async fn main() { println!("Starting the Delta example...");}
2. Define a Neural Network
Next, we create a neural network using Delta’s Sequential
model.
let mut model = Sequential::new() .add(Flatten::new(Shape::from(IxDyn(&[32, 32, 3])))) // CIFAR-10: 32x32x3 -> 3072 .add(Dense::new(128, Some(ReluActivation::new()), true)) // Input: 3072, Output: 128 .add(Dense::new(10, Some(SoftmaxActivation::new()), false)); // Output: 10 classes
model.summary();
3. Compile the Model
Before training, we need to compile the model by defining the optimizer and loss function.
let optimizer = Adam::new(0.001);model.compile(optimizer, MeanSquaredLoss::new());
4. Load the Dataset
Now, we load the CIFAR-10 dataset for training, validation, and testing.
let mut train_data = Cifar10Dataset::load_train().await;let val_data = Cifar10Dataset::load_val().await;let test_data = Cifar10Dataset::load_test().await;
println!("Train dataset size: {}", train_data.len());
5. Train the Model
We train the model using the loaded training data.
let epoch = 10;let batch_size = 32;
match model.fit(&mut train_data, epoch, batch_size) { Ok(_) => println!("Model trained successfully"), Err(e) => println!("Failed to train model: {}", e),}
6. Validate the Model
After training, we validate the model using the validation dataset.
match model.validate(&val_data, batch_size) { Ok(validation_loss) => println!("Validation Loss: {:.6}", validation_loss), Err(e) => println!("Failed to validate model: {}", e),}
7. Evaluate the Model
Finally, we evaluate the model on the test dataset.
let accuracy = model.evaluate(&test_data, batch_size).expect("Failed to evaluate the model");println!("Test Accuracy: {:.2}%", accuracy * 100.0);
8. Save the Model
Once satisfied with the model, we save it to a file for later use.
model.save("model_path").unwrap();
9. Full Example
#[tokio::main]async fn main() { // Create a neural network let mut model = Sequential::new() .add(Flatten::new(Shape::from(IxDyn(&[32, 32, 3])))) // CIFAR-10: 32x32x3 -> 3072 .add(Dense::new(128, Some(ReluActivation::new()), true)) // Input: 3072, Output: 128 .add(Dense::new(10, Some(SoftmaxActivation::new()), false)); // Output: 10 classes
// Display the model summary model.summary();
// Define an optimizer let optimizer = Adam::new(0.001);
// Compile the model model.compile(optimizer, MeanSquaredLoss::new());
// Loading the train and test dataset let mut train_data = Cifar10Dataset::load_train().await; let test_data = Cifar10Dataset::load_test().await; let val_data = Cifar10Dataset::load_val().await;
println!("Training the model..."); println!("Train dataset size: {}", train_data.len());
let epoch = 10; let batch_size = 32;
match model.fit(&mut train_data, epoch, batch_size) { Ok(_) => println!("Model trained successfully"), Err(e) => println!("Failed to train model: {}", e), }
// Validate the model match model.validate(&val_data, batch_size) { Ok(validation_loss) => println!("Validation Loss: {:.6}", validation_loss), Err(e) => println!("Failed to validate model: {}", e), }
// Evaluate the model let accuracy = model.evaluate(&test_data, batch_size).expect("Failed to evaluate the model"); println!("Test Accuracy: {:.2}%", accuracy * 100.0);
// Save the model model.save("model_path").unwrap();}