tensorflow梯度下降

2021-09-27 03:16:00 字數 4013 閱讀 9065

import tensorflow as tf

# model parameters

w = tf.variable([0

], dtype=tf.float32)

#w初值及型別

b = tf.variable([0

], dtype=tf.float32)

#b初值及型別

# model input and output

x = tf.placeholder(tf.float32)

#x的型別

linear_model = w*x + b

y = tf.placeholder(tf.float32)

#y的型別

# loss

loss = tf.reduce_sum(tf.square(linear_model - y)

)# sum of the squares

# optimizer

optimizer = tf.train.gradientdescentoptimizer(

0.001

)train = optimizer.minimize(loss)

# training data

x_train =[1

,2,3

,4,5

]y_train =[0

,-1,

-2,-

3,-4

]# training loop

init = tf.global_variables_initializer(

)sess = tf.session(

)sess.run(init)

# reset values to wrong

for i in

range

(1000):

#迴圈迭代次數

sess.run(train,

)# evaluate training accuracy

curr_w, curr_b, curr_loss = sess.run(

[w, b, loss],)

print

("w: %s b: %s loss: %s"

%(curr_w, curr_b, curr_loss)

)

如果要增加變數之類的,直接上面生命,然後下面直接呼叫就行了,也不會麻煩。

例如:

import tensorflow as tf

# model parameters

w1 = tf.variable([0

], dtype=tf.float32)

w2 = tf.variable([0

], dtype=tf.float32)

b = tf.variable([0

], dtype=tf.float32)

# model input and output

x1 = tf.placeholder(tf.float32)

x2 = tf.placeholder(tf.float32)

linear_model = w1*x1 + w2*x2 + b

y = tf.placeholder(tf.float32)

# loss

loss = tf.reduce_sum(tf.square(linear_model - y)

)# sum of the squares

# optimizer

optimizer = tf.train.gradientdescentoptimizer(

0.001

)train = optimizer.minimize(loss)

# training data

x1_train =[1

,2,3

,4,5

]x2_train =[0

,2,1

,3,6

]y_train =[1

,4,4

,7,11

]# training loop

init = tf.global_variables_initializer(

)sess = tf.session(

)sess.run(init)

# reset values to wrong

for i in

range

(1000):

sess.run(train,

)# evaluate training accuracy

curr_w1,curr_w2, curr_b, curr_loss = sess.run(

[w1,w2, b, loss],)

print

("w1: %s w2: %s b: %s loss: %s"

%(curr_w1, curr_w2, curr_b, curr_loss)

)

執行後的結果:

可能受電腦配置影響,迭代次數多了之後會比較慢。

以開頭的**為例,引入matplotlib包,然後呼叫一些裡面的畫圖的函式。

import matplotlib.pyplot as plt  #引入matplotlib包

import tensorflow as tf

# model parameters

w = tf.variable([0

], dtype=tf.float32)

#w初值及型別

b = tf.variable([0

], dtype=tf.float32)

#b初值及型別

# model input and output

x = tf.placeholder(tf.float32)

#x型別

linear_model = w*x + b

y = tf.placeholder(tf.float32)

#y型別

# loss

loss = tf.reduce_sum(tf.square(linear_model - y)

)# sum of the squares

# optimizer

optimizer = tf.train.gradientdescentoptimizer(

0.001

)train = optimizer.minimize(loss)

# training data

x_train =[1

,2,3

,4,5

]y_train =[0

,-1,

-2,-

3,-4

]# training loop

init = tf.global_variables_initializer(

)sess = tf.session(

)sess.run(init)

# reset values to wrong

for i in

range

(1000):

#迴圈迭代次數

sess.run(train,

)# evaluate training accuracy

curr_w, curr_b, curr_loss = sess.run(

[w, b, loss],)

print

("w: %s b: %s loss: %s"

%(curr_w, curr_b, curr_loss)

)line_values =

[curr_w * i + curr_b for i in x_train]

#根據已算出的斜率和截距寫線的關係式

plt.scatter(x_train,y_train,color=

'r')

#根據x,y的座標描點,紅色

plt.plot(x_train, line_values,

'b')

#畫線plt.show(

)#畫圖

TensorFlow中梯度下降函式

一 介紹 下面介紹在tensorflow中進行隨機梯度下降優化的函式。在tensorflow中通過乙個叫optimizer的優化器類進行訓練優化。二 梯度下降優化器 三 說明 在訓練過程中先例項化乙個優化函式如tf.train.gradientdescentoptimizer,並基於一定的學習率進行...

tensorflow實現梯度下降演算法

假設每次生成資料的公式為 每次計算出的損失函式為 這裡使用平方損失函式 每次採用梯度下降演算法,將梯度 設定為0.001,梯度下降公式為 將data初始化為二維的對應陣列 defgetloss w,b totalloss 0.0 radientw 0.0 radientb 0.0for i in r...

梯度下降 隨機梯度下降 批梯度下降

下面的h x 是要擬合的函式,j 損失函式,theta是引數,要迭代求解的值,theta求解出來了那最終要擬合的函式h 就出來了。其中m是訓練集的記錄條數,j是引數的個數。梯度下降法流程 1 先對 隨機賦值,可以是乙個全零的向量。2 改變 的值,使j 按梯度下降的方向減少。以上式為例 1 對於我們的...