import tensorflow.keras as keras
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
# Load the fashion-mnist pre-shuffled train data and test data
(x_train, y_train), (x_test, y_test) = keras.datasets.fashion_mnist.load_data()
print("x_train shape:", x_train.shape, "y_train shape:", y_train.shape)
# Print training set shape - note there are 60,000 training data of image size of 28x28, 60,000 train labels)
print("x_train shape:", x_train.shape, "y_train shape:", y_train.shape)
# Print the number of training and test datasets
print(x_train.shape[0], 'train set')
print(x_test.shape[0], 'test set')
# Define the text labels
fashion_mnist_labels = ["T-shirt/top", # index 0
"Trouser", # index 1
"Pullover", # index 2
"Dress", # index 3
"Coat", # index 4
"Sandal", # index 5
"Shirt", # index 6
"Sneaker", # index 7
"Bag", # index 8
"Ankle boot"] # index 9
# Image index, you can pick any number between 0 and 59,999
img_index = np.random.randint(0, 59999)
# y_train contains the lables, ranging from 0 to 9
label_index = y_train[img_index]
# Print the label, for example 2 Pullover
print ("y = " + str(label_index) + " " +(fashion_mnist_labels[label_index]))
# # Show one of the images from the training dataset
plt.imshow(x_train[img_index])
from tensorflow.keras.layers import Input, Flatten, Dense
from tensorflow.keras.models import Model
from tensorflow.keras.losses import sparse_categorical_crossentropy
from tensorflow.keras.optimizers import Adam
keras.backend.clear_session()
# x_train = x_train.reshape(-1, 28 * 28 * 1).astype("float32") / 255.0
# x_test = x_test.reshape(-1, 28 * 28 * 1).astype("float32") / 255.0
# y_train = tf.one_hot(y_train, 10)
# y_test = tf.one_hot(y_test , 10)
# print(x_train.shape, y_train.shape, x_test.shape, y_test.shape)
il = Input(shape=(28,28), name="input_layer")
fl = Flatten(name='Flatten_layer')(il)
hl = Dense(512, activation="relu",name="H1")(fl)
hl = Dense(512, activation="relu",name="H2")(hl)
hl = Dense(512, activation="relu",name="H3")(hl)
hl = Dense(512, activation="relu",name="H4")(hl)
hl = Dense(512, activation="relu",name="H5")(hl)
hl = Dense(512, activation="relu",name="H6")(hl)
ol = Dense(10 , activation="softmax" ,name="output_layer")(hl)
model = Model(inputs=il, outputs=ol )
model.compile(loss='sparse_categorical_crossentropy', optimizer=Adam(lr=0.001), metrics=['accuracy'])
model.summary()
model.fit(x_train, y_train, epochs=5, batch_size=128, verbose=1)
score = model.evaluate(x_test, y_test)
# Print test accuracy
print('\n', 'Test accuracy:', score[1])
y_hat = model.predict(x_test)
'도서관 I > AI' 카테고리의 다른 글
기본 keras 사용법 (0) | 2020.06.01 |
---|---|
top_K (0) | 2020.06.01 |
softmax keras (0) | 2020.05.29 |
MNIST 데이터 불러오기 (0) | 2020.05.29 |
python 정리 (0) | 2020.05.28 |