import tensorflow as tf import os os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' def tensorflow_test(): # a = 10 # b = 20 # c = a+b # print(c) con1 = tf.constant(10, name='a') con2 = tf.constant(20, name='b') sum_t = tf.add(con1, con2, name='sum') # 打印默认图 # print(tf.get_default_graph()) # print(con1.graph) # print(con2.graph) # print(sum_t.graph) # add(a, b) # 创建一个自定义的图 # new_g = tf.Graph() # with new_g.as_default(): # new_a = tf.constant(30) # new_b = tf.constant(40) # new_sum = tf.add(new_a, new_b) # print(new_a.graph) # print(new_b.graph) # print(new_sum.graph) # print(sum) # with tf.Session(graph=new_g) as sess: # 定义个placeholder()方法,实现函数传入参数功能 a = tf.placeholder(tf.float32) b = tf.placeholder(tf.float32) sum_ab = tf.add(a, b) print(con1) print(con2) print(sum_t) with tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=True)) as sess: # print(sess.run(sum_t)) # 新图的资源 # print(sess.run(sum_t)) print(sess.run([con1, con2, sum_t])) print(con1.eval()) # 把图序列化成events文件 # file_writer = tf.summary.FileWriter("./tmp/summary/", graph=sess.graph) # print(sess.graph) # 运行placeholder print(sess.run([a, b, sum_ab], feed_dict={a: 3.0, b: 4.0})) return None def shape_test(): a_p = tf.placeholder(dtype=tf.float32, shape=[None, None]) b_p = tf.placeholder(dtype=tf.float32, shape=[None, 10]) c_p = tf.placeholder(dtype=tf.float32, shape=[3, 2]) # 获取静态形状 print("a_p的静态形状为:\n", a_p.get_shape()) print("b_p的静态形状为:\n", b_p.get_shape()) print("c_p的静态形状为:\n", c_p.get_shape()) # 修改这些tensor形状 # 静态修改 a_p.set_shape([10, 12]) print("a_p的静态形状为:\n", a_p.get_shape()) b_p.set_shape([3, 10]) print("b_p的静态形状为:\n", b_p.get_shape()) # c_p.set_shape([2, 3]) # print("c_p的静态形状为:\n", c_p.get_shape()) # 形状固定,不能再用set_shape去修改,若能,set_shape修改张量本身的形状 # 动态修改, 创建新的tensor,不会对原来的tensor修改 c_reshape = tf.reshape(c_p, [2, 3]) print("c_p的静态形状为:\n", c_p.get_shape()) print("cc_reshape的静态形状为:\n", c_reshape.get_shape()) # c_reshape = tf.reshape(c_p, [10, 3]) def variable_test(): a = tf.Variable(initial_value=30.0) b = tf.Variable(initial_value=40.0) sum_t = tf.add(a, b) # 初始化变量 init = tf.global_variables_initializer() print(a, b, sum_t) with tf.Session() as sess: sess.run(init) print(sess.run(sum_t)) tf.app.flags.DEFINE_integer('max_step', 0, "线性回归模型训练步数") FLAGS = tf.app.flags.FLAGS def linear_regression(): """ 根据数据自实现一个线性回归过程 :return: """ # 1 准备好数据集:y = 0.8x + 0.7 100个样本 # 100样本, [100 ,1] * [1, 1] with tf.variable_scope("original_data"): X = tf.random_normal([100, 1], mean=2, stddev=1, name='feature') y_true = tf.matmul(X, [[0.8]]) + 0.7 # 2 建立线性模型 # x [100 ,1] * [1, 1], y [100] # y = wx + b , w[1, 1], b [1, ,1] # 随机初始化W1和b1 # y = W·X + b,目标:求出权重W和偏置b # 训练模型参数必须使用tf.Variable with tf.variable_scope("linear_model"): weights = tf.Variable(initial_value=tf.random_normal([1, 1]), name='w') bias = tf.Variable(initial_value=tf.random_normal([1, 1]), name='b') y_predict = tf.matmul(X, weights) + bias with tf.variable_scope("loss"): # 3 确定损失函数(预测值与真实值之间的误差)-均方误差 loss = tf.reduce_mean(tf.square(y_true - y_predict)) # 4 梯度下降优化损失:需要指定学习率(超参数) with tf.variable_scope("optimizer"): optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01).minimize(loss) # 手动初始化 init = tf.global_variables_initializer() # 收集观察的tensor效果 tf.summary.scalar('losses', loss) tf.summary.histogram('w', weights) tf.summary.histogram('b', bias) # 合并Tensor merge = tf.summary.merge_all() # 添加一个saver saver = tf.train.Saver() # 开启会话运行整个模型 with tf.Session() as sess: sess.run(init) # 训练线性回归模型 # 优化之前打印初始化的模型参数 print("随机初始化的权重为%f, 偏置为%f" % (weights.eval(), bias.eval())) # 第499步的误差为0.000243,权重为0.813589, 偏置为0.667376 # 写入到events文件 file_writer = tf.summary.FileWriter("./tmp/summary/linear/", graph=sess.graph) # 加载历史模型,基于历史模型训练 saver.restore(sess, "./tmp/ckpt/linear/linear_regression.ckpt") for i in range(FLAGS.max_step): sess.run(optimizer) print("第%d步的误差为%f,权重为%f, 偏置为%f" % (i, loss.eval(), weights.eval(), bias.eval())) # 运行观察Tensor summary = sess.run(merge) file_writer.add_summary(summary, i) # 保存模型 saver.save(sess, "./tmp/ckpt/linear/linear_regression.ckpt") if __name__ == '__main__': # tensorflow_test() # shape_test() # variable_test() linear_regression()