python神经网络学习小结2
初始神经网络
张量运算
numpy速度快原因 numpy有些内置的优化函数 当发这些优化函数进行运算时 运算会交给基础线性代数程序集(BLAS)blas是低层次 高效率的操作程序 通常用c或fortran实现
广播
tensorflow简介
tf基于py开发,主要由google开发
keras简介
线性分类器
import numpy as np
from matplotlib import pyplot
import tensorflow as tfsample_num = 1000
ne_samples = np.random.multivariate_normal(mean=[0, 3], cov=[[1, .5], [.5, 1]], size=sample_num)
po_samples = np.random.multivariate_normal(mean=[3, 0], cov=[[1, .5], [.5, 1]], size=sample_num)inputs = np.vstack((ne_samples, po_samples)).astype(np.float32)
targets = np.vstack((np.zeros((sample_num, 1), dtype='float32'), np.ones((sample_num, 1), dtype='float32')))pyplot.scatter(inputs[:, 0], inputs[:, 1], c=targets[:, 0])
# pyplot.show()input_dim = 2
output_dim = 1w = tf.Variable(initial_value=tf.random.uniform(shape=(input_dim, output_dim)))
b = tf.Variable(initial_value=tf.zeros(shape=(output_dim, )))def model(x):return tf.matmul(x, w) + bdef square_loss(outputs, predictions):sample_loss = tf.square(outputs - predictions)return tf.reduce_mean(sample_loss)learning_rate = .1def train(x, outputs):with tf.GradientTape() as tape:predictions = model(x)loss = square_loss(outputs, predictions)grad_loss_w, grad_loss_b = tape.gradient(loss, [w, b])w.assign_sub(grad_loss_w * learning_rate)b.assign_sub(grad_loss_b * learning_rate)return lossfor epoch in range(20):loss = train(inputs, targets)print(f'loss at step {epoch}: {loss:.4f}')x = np.linspace(-1, 4, 100)
y = - (w[0] / w[1] * x) + (0.5 - b) / w[1]
pyplot.plot(x, y, '-r')
predictions = model(inputs)
pyplot.scatter(inputs[:, 0], inputs[:, 1], c=predictions[:, 0] > 0.5)
pyplot.show()