首页 > 其他 > 详细

回归问题

时间:2020-06-16 16:35:05      阅读:45      评论:0      收藏:0      [点我收藏+]

 

连续问题,线性回归:

  Gradient Descent  梯度下降

  y = wx + b

技术分享图片
import tensorflow as tf
import numpy as np

# loss 的值
def compute_error_for_line_given_points(b, w, points):  # points 是一个两维数组
    totalError = 0
    for i in range(0, len(points)):
        x = points[i, 0]
        y = points[i, 1]
        totalError += (y - (w * x + b)) ** 2

    return totalError / float(len(points))


def step_gradient(b_current, w_current, points, learningRate):
    b_gradient = 0
    w_gradient = 0
    N = float(len(points))
    for i in range(0, len(points)):
        x = points[i, 0]
        y = points[i, 1]

        b_gradient += (2 / N) * ((w_current * x + b_current) - y)
        w_gradient += (2 / N) * x * ((w_current * x + b_current) - y)
    # update
    new_b = b_current - (learningRate * b_gradient)
    new_w = w_current - (learningRate * w_gradient)
    return [new_b,new_w]


def gradient_descent_runner(points,starting_b,staring_w,learning_rate,num_iterations):
    b = starting_b
    w = staring_w
    for i in range(num_iterations):
        b,w = step_gradient(b,w,np.array(points),learning_rate)
    return [b,w]

def run():
    points = np.genfromtxt("d:/data.csv",delimiter=,)
    print(points)
    learning_rate = 0.0001
    initial_b = 0
    initial_w = 0
    num_iterations = 1000
    print(f"Starting gradient descent at w:{initial_w},b:{initial_b},error={compute_error_for_line_given_points(initial_b,initial_w,points)}")
    print("Running...")


    [b,w] = gradient_descent_runner(points,initial_b,initial_w,learning_rate,num_iterations)

    print(f"After {num_iterations} iterations   w:{w},b:{b},error={compute_error_for_line_given_points(b,w,points)}")
    print("Running...")

if __name__ == __main__:
    run()
View Code

 

points 为 【100,2】 的二维数组,

离散问题,分类问题:

常见的问题如,图片识别,手写数字识别,

技术分享图片
import tensorflow as tf

from tensorflow.keras import datasets,layers,optimizers
import os
os.environ[TF_CPP_MIN_LOG_LEVEL] = 2

(x,y),(x_val,y_val) = datasets.mnist.load_data()
x = tf.convert_to_tensor(x,dtype=tf.float32)/255.
y = tf.convert_to_tensor(y,dtype=tf.int32)

y = tf.one_hot(y,depth=10)
print(x.shape,y.shape)
train_dataset = tf.data.Dataset.from_tensor_slices((x,y))
train_dataset = train_dataset.batch(200)


model = tf.keras.Sequential([
    layers.Dense(512,activation="relu"),
    layers.Dense(216, activation="relu"),
    layers.Dense(10),
])

optimizer = optimizers.SGD(learning_rate=0.001)
def train_epoch(epoch):
    for step,(x,y) in enumerate(train_dataset): # 300次循环
        with tf.GradientTape() as tape:
            x = tf.reshape(x,(-1,28*28))
            out = model(x)
            loss = tf.reduce_sum(tf.square(out - y))/ x.shape[0]

        grads = tape.gradient(loss,model.trainable_variables)
        optimizer.apply_gradients(zip(grads,model.trainable_variables))

        if step % 100 == 0:
            print(epoch,step,"loss:",loss.numpy())


def train():
    # 对数据集迭代30次
    for epoch in range(30):
        train_epoch(epoch)


if __name__ == __main__:
    train()
手写数字初体验

 

回归问题

原文:https://www.cnblogs.com/zach0812/p/13141345.html

(0)
(0)
   
举报
评论 一句话评论(0
关于我们 - 联系我们 - 留言反馈 - 联系我们:wmxa8@hotmail.com
© 2014 bubuko.com 版权所有
打开技术之扣,分享程序人生!