пишу курсовой проект на питон для распознавания логотипов автомобилей, датасеты делаю сам 30 изображении на 5 марок автомобилей.
все изображения загрузил в data_img
а обозначения для тренировки записал в data_tag_r
вот код на google colaboratory jupiter notebook
import os
myDir = "/content/data_img/"
import numpy as np
f = open('data_tag_r', 'r')
data_tag = np.loadtxt(f)
f.close
data_tag
from keras.utils import to_categorical
data_labels = to_categorical(data_tag)
data_labels
data_labels.shape
from keras.preprocessing.image import img_to_array
from keras.applications import imagenet_utils
import numpy as np
import cv2
import io
image = cv2.imread("/content/data_img/a0000.jpg", 0)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
print(image.shape, image.size, image.dtype)
width = 64
height = 64
dim = (width, height)
image = cv2.resize(image, dim, interpolation = cv2.INTER_AREA)
print(image.shape, image.size, image.dtype)
image = img_to_array(image)
image = np.expand_dims(image, axis=0)
image = imagenet_utils.preprocess_input(image)
print(image.shape, image.size, image.dtype)
data_images = []
fileList = os.listdir("/content/data_img/")
width = 512
height = 512
dim = (width, height)
for i, filename in enumerate(fileList) :
img_file = '/content/data_img/'+ filename
image = cv2.imread(img_file, 0)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = cv2.resize(image, dim, interpolation = cv2.INTER_AREA)
image = img_to_array(image)
#image = np.expand_dims(image, axis=0)
#image = imagenet_utils.preprocess_input(image)
data_images.append(image)
data_images = np.array(data_images) / 255
data_images.shape
data_images
x_train = data_images[:]
# x_test = data_images[10:]
y_train = data_labels[:]
# y_test = data_labels[10:]
from keras import models
from keras import layers
network = models.Sequential()
network.add(layers.Conv2D(32, (5, 5), activation='relu', input_shape=(width,height,3)))
network.add(layers.MaxPooling2D((2, 2)))
network.add(layers.Conv2D(64, (3, 3), activation='relu'))
network.add(layers.MaxPooling2D((2, 2)))
network.add(layers.Conv2D(128, (3, 3), activation='relu'))
network.add(layers.MaxPooling2D((2, 2)))
network.add(layers.Conv2D(256, (3, 3), activation='relu'))
network.add(layers.MaxPooling2D((2, 2)))
network.add(layers.Flatten())
network.add(layers.Dense(256, activation='relu'))
network.add(layers.Dense(64, activation='relu'))
network.add(layers.Dense(15, activation='softmax'))
network.summary()
network.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'] )
### И вот тут у меня ошибки выходят ###
# network.fit(x_train, y_train, epochs=20, batch_size=8, validation_data=(x_test, y_test))
network.fit(x_train, y_train, epochs=3, batch_size=8)
### /И вот тут у меня ошибки выходят ###
/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/tensor_shape.py:1134 assert_is_compatible_with
raise ValueError("Shapes %s and %s are incompatible" % (self, other))
ValueError: Shapes (None, 5) and (None, 15) are incompatible
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/training.py in _method_wrapper(self, *args, **kwargs)
106 def _method_wrapper(self, *args, **kwargs):
107 if not self._in_multi_worker_mode(): # pylint: disable=protected-access
--> 108 return method(self, *args, **kwargs)
109
110 # Running inside `run_distribute_coordinator` already.
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing)
1096 batch_size=batch_size):
1097 callbacks.on_train_batch_begin(step)
-> 1098 tmp_logs = train_function(iterator)
1099 if data_handler.should_sync:
1100 context.async_wait()