# -*- coding: utf-8 -*-
"""
Created on Mon Jun 8 20:42:57 2020
@author: hkx
"""
# MNIST学习入门.
# =============================================================================
"""--------------------模板导入----------------------------------------------"""
import tensorflow.compat.v1 as tf
import input_data
#import tensorflow as tf
"""--------------------数据加载----------------------------------------------"""
‘读取mnist数据‘
mnist = input_data.read_data_sets("D:/MNIST_DATA/", one_hot=True)
‘禁用急切的执行,表示先不执行tf张量‘
tf.disable_eager_execution()
"""--------------------线性回归模型 -----------------------------------------"""
‘权重‘
W = tf.Variable(tf.zeros([784,10]))
‘偏置‘
b = tf.Variable(tf.zeros([10]))
‘输入图像张量‘
x = tf.placeholder("float", [None, 784])
‘将矩阵a乘以矩阵b,生成a * b‘
Wx=tf.matmul(x,W)
‘线性回归Y=WX+B,y 是我们预测的概率分布‘
y = tf.nn.softmax(Wx + b)
"""--------------------训练模型----------------------------------------------"""
‘y_是实际的分布‘
y_ = tf.placeholder("float", [None,10])
‘ 计算交叉熵 tf.reduce_sum用于计算张量tensor沿着某一维度的和,可以在求和后降维‘
cross_entropy = -tf.reduce_sum(y_*tf.log(y))
‘梯度下降算法‘
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
‘初始化我们创建的变量‘
init = tf.initialize_all_variables()
‘Session里面启动我们的模型‘
sess = tf.Session()
‘并且初始化变量‘
sess.run(init)
‘开始训练模型,这里我们让模型循环训练1000次‘
for i in range(1000):
batch_xs, batch_ys = mnist.train.next_batch(100)
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
"""--------------------评估模型----------------------------------------------"""
‘tf.argmax给出某个tensor对象在某一维上的其数据最大值所在的索引值‘
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
‘确定正确预测项的比例,我们可以把布尔值转换成浮点数,然后取平均值‘
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
‘print‘
print(sess.run(accuracy, feed_dict={
x: mnist.test.images, y_: mnist.test.labels}))
MNIST学习入门
原文:https://www.cnblogs.com/ygxxx8060/p/13069647.html