A caption
library(keras)
cifar<-dataset_cifar10()
#Training Data
train_x<-cifar$train$x / 255
train_y<-to_categorical(cifar$train$y,num_classes = 10)
#Test Data
test_x<-cifar$test$x/255
test_y<-to_categorical(cifar$test$y,num_classes = 10)
dim(train_x)
## [1] 50000 32 32 3
dim(test_x)
## [1] 10000 32 32 3
dim(train_y)
## [1] 50000 10
dim(test_y)
## [1] 10000 10
cat("No of training samples\t--",dim(train_x)[[1]] ,
"\tNo of test samples\t--",dim(test_x)[[1]])#checking the
## No of training samples -- 50000 No of test samples -- 10000
model<-keras_model_sequential()
#建立線性堆疊模型, 加入兩層卷積 (丟棄 25% 神經元) + 池化層 :
model %>%
layer_conv_2d(filter=32,kernel_size=c(3,3),padding="same",
input_shape=c(32,32,3)) %>%
layer_activation("relu") %>%
layer_dropout(0.25) %>%
layer_max_pooling_2d(pool_size=c(2,2)) %>%
layer_conv_2d(filter=64,kernel_size=c(3,3),padding="same") %>%
layer_activation("relu") %>%
layer_dropout(0.25) %>%
layer_max_pooling_2d(pool_size=c(2,2)) %>%
#建立分類模型 (MLP) : 平坦層 + 隱藏層 (1024 神經元) + 輸出層 (10 神經元)
layer_flatten() %>%
layer_dropout(0.25) %>%
layer_dense(1024) %>%
layer_activation("relu") %>%
layer_dropout(0.25) %>%
layer_dense(10) %>%
layer_activation("softmax")
summary(model)
## ___________________________________________________________________________
## Layer (type) Output Shape Param #
## ===========================================================================
## conv2d_1 (Conv2D) (None, 32, 32, 32) 896
## ___________________________________________________________________________
## activation_1 (Activation) (None, 32, 32, 32) 0
## ___________________________________________________________________________
## dropout_1 (Dropout) (None, 32, 32, 32) 0
## ___________________________________________________________________________
## max_pooling2d_1 (MaxPooling2D) (None, 16, 16, 32) 0
## ___________________________________________________________________________
## conv2d_2 (Conv2D) (None, 16, 16, 64) 18496
## ___________________________________________________________________________
## activation_2 (Activation) (None, 16, 16, 64) 0
## ___________________________________________________________________________
## dropout_2 (Dropout) (None, 16, 16, 64) 0
## ___________________________________________________________________________
## max_pooling2d_2 (MaxPooling2D) (None, 8, 8, 64) 0
## ___________________________________________________________________________
## flatten_1 (Flatten) (None, 4096) 0
## ___________________________________________________________________________
## dropout_3 (Dropout) (None, 4096) 0
## ___________________________________________________________________________
## dense_1 (Dense) (None, 1024) 4195328
## ___________________________________________________________________________
## activation_3 (Activation) (None, 1024) 0
## ___________________________________________________________________________
## dropout_4 (Dropout) (None, 1024) 0
## ___________________________________________________________________________
## dense_2 (Dense) (None, 10) 10250
## ___________________________________________________________________________
## activation_4 (Activation) (None, 10) 0
## ===========================================================================
## Total params: 4,224,970
## Trainable params: 4,224,970
## Non-trainable params: 0
## ___________________________________________________________________________
#Compiling the Model
model %>% compile(
loss = "categorical_crossentropy",
optimizer = 'adam',
metrics = "accuracy"
)
#Fit the Model
history <- model %>% fit(
train_x, train_y,
epochs = 10, batch_size = 128,
validation_split = 0.2,verbose = 2
)
訓練十次後模型在training data的準確率達到0.84,在test data的準確率達到0.74。
plot(history)
model %>% evaluate(test_x, test_y)
## $loss
## [1] 0.7644399
##
## $acc
## [1] 0.7384