Hi Kwagyeman
I have modified three versions of the model as listed below, but I am still getting the ‘ValueError: Failed to allocate tensors’.
Could you please take a look and advise on how to improve this?
IMG_HEIGHT = 48
IMG_WIDTH = 48
CHANNELS = 1
def simple_cnn_for_openmv():
input_tensor = layers.Input(shape=(IMG_HEIGHT, IMG_WIDTH, 1))
x = layers.Conv2D(16, (3, 3), padding='same', use_bias=False)(input_tensor)
x = layers.BatchNormalization()(x)
x = layers.Activation('relu')(x)
x = layers.MaxPooling2D(2, 2)(x)
x = layers.Conv2D(32, (3, 3), padding='same', use_bias=False)(x)
x = layers.BatchNormalization()(x)
x = layers.Activation('relu')(x)
x = layers.MaxPooling2D(2, 2)(x)
x = layers.Conv2D(64, (3, 3), padding='same', use_bias=False)(x)
x = layers.BatchNormalization()(x)
x = layers.Activation('relu')(x)
x = layers.MaxPooling2D(2, 2)(x)
x = layers.GlobalAveragePooling2D()(x)
x = layers.Dropout(0.2)(x)
output_tensor = layers.Dense(NUM_CLASSES, activation='softmax')(x)
model = models.Model(inputs=input_tensor, outputs=output_tensor)
return model
def ultra_lite_cnn():
input_tensor = layers.Input(shape=(IMG_HEIGHT, IMG_WIDTH, 1))
x = layers.Conv2D(8, (3, 3), padding='same', use_bias=False)(input_tensor)
x = layers.BatchNormalization()(x)
x = layers.Activation('relu')(x)
x = layers.MaxPooling2D(2, 2)(x)
x = layers.Conv2D(16, (3, 3), padding='same', use_bias=False)(x)
x = layers.BatchNormalization()(x)
x = layers.Activation('relu')(x)
x = layers.MaxPooling2D(2, 2)(x)
x = layers.Conv2D(32, (3, 3), padding='same', use_bias=False)(x)
x = layers.BatchNormalization()(x)
x = layers.Activation('relu')(x)
x = layers.MaxPooling2D(2, 2)(x)
x = layers.GlobalAveragePooling2D()(x)
x = layers.Dropout(0.2)(x)
output_tensor = layers.Dense(NUM_CLASSES, activation='softmax')(x)
model = models.Model(inputs=input_tensor, outputs=output_tensor)
return model
def nano_strided_model():
input_tensor = layers.Input(shape=(IMG_HEIGHT, IMG_WIDTH, 1))
x = layers.Conv2D(8, (3, 3), strides=(2, 2), padding='same', use_bias=False)(input_tensor)
x = layers.BatchNormalization()(x)
x = layers.Activation('relu')(x)
x = layers.Conv2D(16, (3, 3), strides=(2, 2), padding='same', use_bias=False)(x)
x = layers.BatchNormalization()(x)
x = layers.Activation('relu')(x)
x = layers.Conv2D(32, (3, 3), strides=(2, 2), padding='same', use_bias=False)(x)
x = layers.BatchNormalization()(x)
x = layers.Activation('relu')(x)
x = layers.GlobalAveragePooling2D()(x)
x = layers.Dropout(0.2)(x)
output_tensor = layers.Dense(NUM_CLASSES, activation='softmax')(x)
model = models.Model(inputs=input_tensor, outputs=output_tensor)
return model