Understood, thank you.
I have one more question (if you have the time to help I would really appreciate it):
This is my first time working with Arduino and OpenMV, my goal is to transform a Python code that I wrote on Spyder app into a code that can be deployed on Arduino using OpenMV and MicroPython. My code trains a neural network to check a data set of numbers and see if each number in the data set is < 800 then it returns 1 and if it’s >= 800 it returns 0, and if it’s 0 the red LED should blink.
My final goal is to use the MQ-135 (Gaz Sensor) to return values for me (from the sensor measurements) instead of having to give it a data set. Then the code will check the RETURNED values from the sensor and return 1 or 0 based on these numbers. I tried pasting the Python code as is into Open MV (I now know this doesn’t work) but I have no idea how to move forward. Is there a simple way to transform the Python code I have so that Open MV can deploy it into Arduino? Note: Using neural network is a MUST in my case.
This is my code:
import pandas as pd
import numpy as np
import os
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.optimizers.schedules import ExponentialDecay
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
from keras.callbacks import EarlyStopping, LearningRateScheduler
# Set the random seed for reproducibility
tf.random.set_seed(42)
# Load your dataset (replace 'your_dataset.csv' with the actual file name)
df = pd.read_csv('C:\\Users\\Malak\\co2_2000_dataset.csv')
# Split the dataset into features (X) and target variable (y)
X = df[['CO2']].values # Features
y = df['Label'].values # Target variable
# Split the data into training and test sets (adjust test_size as needed)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# Normalize feature values to a range between 0 and 1
scaler = MinMaxScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
np.save('X_CO2train.npy', X_train)
np.save('X_CO2test.npy', X_test)
np.save('y_CO2train.npy', y_train)
np.save('y_CO2test.npy', y_test)
# Load the preprocessed training and test sets
X_train = np.load('X_CO2train.npy')
X_test = np.load('X_CO2test.npy')
y_train = np.load('y_CO2train.npy')
y_test = np.load('y_CO2test.npy')
test_dataset = tf.data.Dataset.from_tensor_slices((X_test, y_test))
# Create a sequential model with two hidden layers
initial_learning_rate = 0.01
lr_schedule = ExponentialDecay(
initial_learning_rate, decay_steps=100, decay_rate=0.9, staircase=True
)
model1 = keras.Sequential([
keras.layers.Dense(5, activation='relu', input_shape=[1]),
keras.layers.Dense(3, activation='relu'),
keras.layers.Dense(1, activation='sigmoid')
])
# Compile the model with the learning rate schedule
model1.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=lr_schedule),
loss='binary_crossentropy',
metrics=['accuracy'])
# Print model summary
model1.summary()
initial_weights = model1.layers[0].get_weights()
# Print the initial weights and biases
print("Initial Weights:", initial_weights[0])
print("Initial Biases:", initial_weights[1])
import time
start_time = time.time()
# Train the model
history = model1.fit(X_train, y_train, epochs=100, batch_size=32, validation_split=0.1)
# Evaluate the model on the test set
test_loss, test_accuracy = model1.evaluate(X_test, y_test)
print(f'Test accuracy: {test_accuracy*100:.2f}%')
# Make predictions on the test set
y_pred = (model1.predict(X_test) > 0.5).astype(int)
training_time = time.time() - start_time
# Print the training time
print(f"Training time for neural network with three hidden layers: {training_time} seconds")
final_step = 100 # Adjust this to the final step of your training
# For model1
final_learning_rate_model1 = lr_schedule(final_step)
print(f"Final Learning Rate for model1: {final_learning_rate_model1}")
# Define a function to load the test set as a representative dataset
def representative_data_gen():
X_test = np.load('X_CO2test.npy')
for input_data in X_test:
# Ensure input_data is of data type FLOAT32
input_data = input_data.astype(np.float32)
yield [input_data]
# Extract the training and validation accuracy and loss from the history object
train_accuracy = history.history['accuracy']
val_accuracy = history.history['val_accuracy']
train_loss = history.history['loss']
val_loss = history.history['val_loss']
# Create subplots for accuracy and loss
plt.figure(figsize=(12, 4))
# Plot training & validation accuracy values
plt.subplot(1, 2, 1)
plt.plot(train_accuracy, label='Training Accuracy')
plt.plot(val_accuracy, label='Validation Accuracy')
plt.title('Model Accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend()
# Plot training & validation loss values
plt.subplot(1, 2, 2)
plt.plot(train_loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.title('Model Loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend()
# Show the plots
plt.tight_layout()
plt.show()
# Make predictions on the test set and save it to a CSV file
y_pred = (model1.predict(X_test) > 0.5).astype(int)
test_set_with_labels = pd.DataFrame({'CO2': X_test.flatten(), 'Label': y_test, 'Predicted_Label': y_pred.flatten()})
# Define the converter with quantization
converter = tf.lite.TFLiteConverter.from_keras_model(model1)
# Set the optimization flag to enable quantization
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_data_gen
# Optionally, you can specify the target quantization precision for the weights and activations
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
# Convert the model to TFLite format
tflite_quantized_model = converter.convert()
# Save the quantized TFLite model to a file
with open('quantized_model.tflite', 'wb') as f:
f.write(tflite_quantized_model)
import os
# Get and print the current working directory
current_directory = os.getcwd()
print("path:", current_directory)
import numpy as np
# Load the TFLite model
with open('quantized_model.tflite', 'rb') as f:
tflite_model = f.read()
# Convert the model bytes to a C array format
model_data = ', '.join(map(str, tflite_model))
# Write the C array to a header file
with open('model_data.h', 'w') as f:
f.write('#ifndef MODEL_DATA_H\n')
f.write('#define MODEL_DATA_H\n\n')
f.write('const unsigned char model_data[] = {' + model_data + '};\n\n')
f.write('#endif\n')
Thank you for your time.