Hello!
I’ve saved a numpy array (400,400,3) to .bmp file and the format is RGB565. But when I tried to load the .bmp file from flash memory and created an Image object, I found the byte length of the Image object is quite wierd (32000 instead of 48000 for RGB565 image).
img = image.Image(‘sample_4.bmp’, buffer=bytes, copy_to_fb=True)
print('img size: ', img.size())
# Preprocess
h = img.height()
w = img.width()
print('H x W: ', h, w)
x_scale = self.crop_size / h
y_scale = self.crop_size / w
img.scale( x_scale= x_scale,
y_scale = y_scale,
roi = None,
rgb_channel = -1,
alpha= 256,
color_palette=None,
alpha_palette=None,
hint = 0,# nearest interpolation
copy = False,
copy_to_fb = False)
# Image2array
img_array = np.frombuffer(img, dtype=np.int8)
print(img_array.shape)
And the output of print shows:
img size: 320000
H x W: 400 400
(20000,)
Why img.size() can be 320000 instead of 4004003=480000? And why after scaling, the byte length becomes 20000 instead of 30000 (which looks like one of the RGB channel is missing)?
PS: The following python scripts are how I generate my .bmp files:
def save_bmp(filename, img):
width, height, channels = img.shape
row_size = (width * channels + 3) // 4 * 4 #
pixel_data = img.tobytes()
bmp_header = struct.pack('<2sIHHIIIIHHIIIIII',
b'BM', # BMP
54 + row_size * height,
0, 0,
54,
40,
width, height,
1,
channels * 8,
0,
row_size * height,
0,
0,
0,
0)
with open(filename, 'wb') as f:
f.write(bmp_header)
f.write(pixel_data)
for i in range(num_samples):
img = torch.tensor(extracted_images[i], dtype=torch.float32)
img = fivecrop_scale(img, crop_size=450)
img = img.permute(1, 2, 0)
img_array = img.numpy()
print('Permuted Image Shape:', img.shape)
img_bmp = cv2.flip(img_array, 0)
img_bmp = (img_bmp * 255).astype(np.uint8)
print("Image shape:", img_bmp.shape) # (height, width, 3)
save_bmp(f'/data/sample_{i+1}.bmp', img_bmp)