import tensorflow as tf import numpy as np # 1-1 初识tensorflow会话 ########################### # with定义会话 # a = tf.constant([[1,2]]) # b = tf.constant([[3],[5]]) # c = tf.multiply(a,b) # d = tf.matmul(a,b) # with tf.Session() as sess: # a1 = sess.run(a) # b1 = sess.run(b) # d1 = sess.run(d) # print(a1) # print(b1) # print(d1) # 常规定义会话 # sess = tf.Session() # a1 = sess.run(a) # b1 = sess.run(b) # c1 = sess.run(c) # d1 = sess.run(d) # print(a1) # print(b1) # print(c1) # print(d1) # sess.close() # 2-2 变量 (必须初始化) ########################### # a = tf.Variable([1,2]) # b = tf.constant([5,8]) # c = tf.subtract(a,b) #减法 # d = tf.add(a,b) #加法 # #变量自加1,赋值 # e = tf.Variable(5) # newe = tf.add(e,1) # update = tf.assign(e,newe) # # init = tf.global_variables_initializer() #有变量必须初始化全局变量 # with tf.Session() as sess: # sess.run(init) # # print(sess.run(a)) # # print(sess.run(b)) # e1 = sess.run(e) # for i in range(5): # sess.run(update) # print(sess.run(e)) # 2-3 Fetch and Feed ######################## # Fetch # a = tf.constant(1) # b = tf.constant(2) # c = tf.add(a,b) # d = tf.multiply(b,c) # with tf.Session() as sess: # result = sess.run([d,c]) # print(result) # Feed # a = tf.placeholder(tf.float32) # b = tf.placeholder(tf.float32) # output = tf.multiply(a,b) # with tf.Session() as sess: # result = sess.run(output,feed_dict={a:25,b:5}) # print(result) # 2-4 简单实例 ###################### #模拟训练数据集 x_data = np.random.rand(100) y_data = 5 * x_data + 3 #训练参数 k = tf.Variable(0.) b = tf.Variable(0.) y = k * x_data + b #代价函数 loss = tf.reduce_mean(tf.square(y - y_data)) #梯度下降 optimizer = tf.train.GradientDescentOptimizer(0.3) #最小化代价函数 train = optimizer.minimize(loss) init = tf.global_variables_initializer() with tf.Session() as sess: sess.run(init) for i in range(200): sess.run(train) if i % 20 == 0: print(i,sess.run([k,b]))
tensorflow1--会话,变量,fetch and feed,线性回归示例
原文:https://www.cnblogs.com/cxhzy/p/10862259.html