python使用梯度下降演算法實現乙個多線性回歸

2021-10-14 05:55:09 字數 3989 閱讀 1017

更多程式設計教程請到:菜鳥教程

高州陽光論壇

人人影視

圖示:

# normalize the data 歸一化值 (x - mean) / (std)

pga.at = (pga.at - pga.at.mean()) / pga.at.std()

pga.v = (pga.v - pga.v.mean()) / pga.v.std()

pga.ap = (pga.ap - pga.ap.mean()) / pga.ap.std()

pga.rh = (pga.rh - pga.rh.mean()) / pga.rh.std()

pga.pe = (pga.pe - pga.pe.mean()) / pga.pe.std()

def cost(theta0, theta1, theta2, theta3, theta4, x1, x2, x3, x4, y):

j = 0

m = len(x1)

for i in range(m):

h=theta0+x1[i]*theta1+x2[i]*theta2+x3[i]*theta3+x4[i]*theta4

j += (h - y[i])**2

j /= (2*m)

return j

def partial_cost_theta4(theta0,theta1,theta2,theta3,theta4,x1,x2,x3,x4,y):

h = theta0 + x1 * theta1 + x2 * theta2 + x3 * theta3 + x4 * theta4

diff = (h - y) * x4

partial = diff.sum() / (x2.shape[0])

return partial

def partial_cost_theta3(theta0,theta1,theta2,theta3,theta4,x1,x2,x3,x4,y):

h = theta0 + x1 * theta1 + x2 * theta2 + x3 * theta3 + x4 * theta4

diff = (h - y) * x3

partial = diff.sum() / (x2.shape[0])

return partial

def partial_cost_theta2(theta0,theta1,theta2,theta3,theta4,x1,x2,x3,x4,y):

h = theta0 + x1 * theta1 + x2 * theta2 + x3 * theta3 + x4 * theta4

diff = (h - y) * x2

partial = diff.sum() / (x2.shape[0])

return partial

def partial_cost_theta1(theta0,theta1,theta2,theta3,theta4,x1,x2,x3,x4,y):

h = theta0 + x1 * theta1 + x2 * theta2 + x3 * theta3 + x4 * theta4

diff = (h - y) * x1

partial = diff.sum() / (x2.shape[0])

return partial

def partial_cost_theta0(theta0, theta1, theta2, theta3, theta4, x1, x2, x3, x4, y):

h = theta0 + x1 * theta1 + x2 * theta2 + x3 * theta3 + x4 * theta4

diff = (h - y)

partial = diff.sum() / (x2.shape[0])

return partial

def gradient_descent(x1,x2,x3,x4,y, alpha=0.1, theta0=0, theta1=0,theta2=0,theta3=0,theta4=0):

max_epochs = 1000 # maximum number of iterations 最大迭代次數

counter = 0 # intialize a counter 當前第幾次

c = cost(theta0, theta1, theta2, theta3, theta4, x1, x2, x3, x4, y) ## initial cost 當前代價函式

costs = [c] # lets store each update 每次損失值都記錄下來

convergence_thres = 0.000001

cprev = c + 10

theta0s = [theta0]

theta1s = [theta1]

theta2s = [theta2]

theta3s = [theta3]

theta4s = [theta4]

while (np.abs(cprev - c) > convergence_thres) and (counter < max_epochs):

cprev = c

update0 = alpha * partial_cost_theta0(theta0, theta1, theta2, theta3, theta4, x1, x2, x3, x4, y)

update1 = alpha * partial_cost_theta1(theta0, theta1, theta2, theta3, theta4, x1, x2, x3, x4, y)

update2 = alpha * partial_cost_theta2(theta0, theta1, theta2, theta3, theta4, x1, x2, x3, x4, y)

update3 = alpha * partial_cost_theta3(theta0, theta1, theta2, theta3, theta4, x1, x2, x3, x4, y)

update4 = alpha * partial_cost_theta4(theta0, theta1, theta2, theta3, theta4, x1, x2, x3, x4, y)

theta0 -= update0

theta1 -= update1

theta2 -= update2

theta3 -= update3

theta4 -= update4

c = cost(theta0, theta1, theta2, theta3, theta4, x1, x2, x3, x4, y)

#return

return

print(「costs =」, gradient_descent(pga.at, pga.v,pga.ap,pga.rh,pga.pe)[『costs』])

descend = gradient_descent(pga.at, pga.v,pga.ap,pga.rh,pga.pe, alpha=.01)

plt.scatter(range(len(descend[「costs」])), descend[「costs」])

plt.show()

損失函式隨迭代次數變換圖:

梯度下降演算法 隨機梯度下降演算法scala實現

梯度下降和隨機梯度下降是機器學習中最常用的演算法之一。關於其具體的原理這裡不多做介紹,網路上可以很方便的找到。例如可以參考部落格 scala 實現如下 object sgd errorsum y row rowsum for col 0 until x 0 length loss 0.0 for r...

梯度下降演算法 梯度下降演算法為何叫梯度下降?

首先,我們知道乙個演算法的名字可以很好地去解釋乙個演算法,那麼梯度下降演算法是什麼呢?很明顯的,就是用梯度這個工具來解決問題的一種演算法。解決什麼問題呢?如何在乙個函式曲面的某一點,找到乙個函式值變化最大的方向。比如 我們站在山上的某一點,我們想要以最快的速度上山,但是我們的步子大小是一定的,那麼最...

梯度下降演算法 梯度下降演算法公式推導

場景假設 梯度下降法的基本思想可以模擬為乙個下山的過程。假設這樣乙個場景 乙個人被困在山上,需要從山上下來 找到山的最低點 但此時山上的濃霧很大,導致可視度很低 因此,下山的路徑就無法確定,必須利用自己周圍的資訊一步一步地找到下山的路。這個時候,便可利用梯度下降演算法來幫助自己下山。怎麼做呢,首先以...