Tensorflow实现手写数字识别
import tensorflow as tf
import numpy as np
class MNISTLoader(object):"""数据加载处理类"""def __init__(self):""""""# 1、获取数据 (self.train_data, self.train_label), (self.test_data, self.test_label) = tf.keras.datasets.mnist.load_data()# 2、处理数据,归一化,维度以及类型# MNIST中的图像默认为uint8(0-255的数字)。以下代码将其归一化到0-1之间的浮点数,并在最后增加一维作为颜色通道# 默认下载是(60000, 28, 28),扩展到四维方便计算理解[60000, 28, 28, 1]self.train_data = np.expand_dims(self.train_data.astype(np.float32) / 255.0, axis=-1)# [10000, 28, 28, 1]self.test_data = np.expand_dims(self.test_data.astype(np.float32) / 255.0, axis=-1)self.train_label = self.train_label.astype(np.int32) # [60000]self.test_label = self.test_label.astype(np.int32) # [10000]# 获取数据的大小self.num_train_data, self.num_test_data = self.train_data.shape[0], self.test_data.shape[0]def get_batch(self, batch_size):"""随机获取获取批次数据:param batch_size: 批次大小:return:"""# 从数据集中随机取出batch_size个元素并返回index = np.random.randint(0, np.shape(self.train_data)[0], batch_size)return self.train_data[index, :], self.train_label[index]class MLP(tf.keras.Model):"""自定义MLP类"""def __init__(self):super().__init__()# 定义两层神经网络,第一层100个神经元,激活函数relu,第二层10个神经元输出给softmaxself.flatten = tf.keras.layers.Flatten()self.dense1 = tf.keras.layers.Dense(units=100, activation=tf.nn.relu)self.dense2 = tf.keras.layers.Dense(units=10)def call(self, inputs):# [batch_size, 28, 28, 1]x = self.flatten(inputs)# [batch_size, 784]x = self.dense1(x)# [batch_size, 100]x = self.dense2(x)# [batch_size, 10]output = tf.nn.softmax(x)return outputnum_epochs = 5
batch_size = 50
learning_rate = 0.001# 实例化模型和数据读取类,并实例化一个优化器,这里使用 Adam 优化器
model = MLP()
data_loader = MNISTLoader()
optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)
# 计算出大概需要迭代批次大小
num_batches = int(data_loader.num_train_data // batch_size * num_epochs)# 进行批次数据获取
for batch_index in range(num_batches):X, y = data_loader.get_batch(batch_size)with tf.GradientTape() as tape:y_pred = model(X)# 使用tf.keras.losses计算损失loss = tf.keras.losses.sparse_categorical_crossentropy(y_true=y, y_pred=y_pred)# 求出平均损失loss = tf.reduce_mean(loss)print("batch %d: loss %f" % (batch_index, loss.numpy()))grads = tape.gradient(loss, model.variables)optimizer.apply_gradients(grads_and_vars=zip(grads, model.variables))y_pred = model.predict(data_loader.test_data)
# 定义评估函数
sparse_categorical_accuracy = tf.keras.metrics.SparseCategoricalAccuracy()
# 定义测试数据集一共批次的大小
sparse_categorical_accuracy.update_state(y_true=data_loader.test_label, y_pred=y_pred)
print("测试准确率: %f" % sparse_categorical_accuracy.result())