使用tensorflow的线性回归的例子(二)
#List3-30
拟合y=2x
%matplotlib inline
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
x_train = np.linspace(-1, 1, 101)
y_train = 2 * x_train + np.random.randn(*x_train.shape) * 0.33
plt.scatter(x_train, y_train)
w = tf.Variable(np.random.randn(),tf.float32)
b = tf.Variable(np.random.randn(),tf.float32)
#设置迭代次数和学习率
learning_rate = 0.01
training_epochs = 100
loss = []
count = 0
display_count = 10 #控制显示粒度的参数,每训练10个样本输出一次损失值
#定义模型函数
def model(x,w,b):
return tf.multiply(x,w)+b
def loss_fun(x,y,w,b):
err = model(x,w,b)-y
squared_err = tf.square(err)
return tf.reduce_mean(squared_err)
def grad(x,y,w,b):
with tf.GradientTape() as tape:
loss_ = loss_fun(x,y,w,b)
return tape.gradient(loss_,[w,b])
#optimizer= tf.keras.optimizers.Adam(learning_rate=0.01)
for epoch in range(training_epochs):
for (x, y) in zip(x_train, y_train):
loss_ =loss_fun(x,y,w,b)
loss.append(loss_)
#计算当前[w,b]的梯度
delta_w,delta_b = grad(x,y,w,b)
change_w = delta_w * learning_rate
change_b = delta_b * learning_rate
w.assign_sub(change_w)
b.assign_sub(change_b)
#训练步数加1
count = count +1
if count % display_count == 0:
print('train epoch : ','%02d'%(epoch+1),'step:%03d' % (count),'loss= ','{:.9f}'.format(loss_))
plt.scatter(x_train, y_train)
y_learned = x_train*w.numpy()+b.numpy()
plt.plot(x_train, y_learned, 'r')
plt.show()