手寫邏輯回歸python實現

2021-10-21 14:20:58 字數 3782 閱讀 8529

呼叫範例:

plt.scatter(x[y ==0,

0], x[y ==0,

1], color=

"red"

)plt.scatter(x[y ==1,

0], x[y ==1,

1], color=

"blue"

)plt.show(

)# 切分資料集

x_train, x_test, y_train, y_test = train_test_split(x, y, random_state=

666)

# 呼叫我們自己的邏輯回歸函式

log_reg = logisticregression(

)log_reg.fit(x_train, y_train)

print

("final score is :%s"

% log_reg.score(x_test, y_test)

)print

("actual prob is :"

)print

(log_reg.predict_proba(x_test)

)

import numpy as np

from sklearn.metrics import accuracy_score

class

logisticregression

(object):

def__init__

(self)

:"""初始化logistic regression模型"""

self.coef =

none

self.intercept =

none

self._theta =

none

defsigmoid

(self, t)

:return1.

/(1.

+ np.exp(

-t))

deffit

(self, x_train, y_train, eta=

0.01

, n_iters=

1e4)

:"""使用梯度下降法訓練logistic regression模型"""

assert x_train.shape[0]

== y_train.shape[0]

, \ "the size of x_train must be equal to the size of y_train"

defj

(theta, x_b, y)

: y_hat = self.sigmoid(x_b.dot(theta)

)try

:return

-np.

sum(y * np.log(y_hat)+(

1- y)

* np.log(

1- y_hat))/

len(y)

except

:return

float

('inf'

)def

dj(theta, x_b, y)

:# 向量化後的公式

return x_b.t.dot(self.sigmoid(x_b.dot(theta)

)- y)

/len

(y)def

gradient_descent

(x_b, y, initial_theta, eta, n_iters=

1e4, epsilon=1e-

8): theta = initial_theta

cur_iter =

0while cur_iter < n_iters:

gradient = dj(theta, x_b, y)

last_theta = theta

theta = theta - eta * gradient

ifabs(j(theta, x_b, y)

- j(last_theta, x_b, y)

)< epsilon:

break

cur_iter +=

1return theta

x_b = np.hstack(

[np.ones(

(len

(x_train),1

)), x_train]

) initial_theta = np.zeros(x_b.shape[1]

) self._theta = gradient_descent(x_b, y_train, initial_theta, eta, n_iters)

# 截距

self.intercept = self._theta[0]

# x_i前的引數

self.coef = self._theta[1:

]return self

defpredict_proba

(self, x_predict)

:"""給定待**資料集x_predict,返回表示x_predict的結果概率向量"""

assert self.intercept is

notnone

and self.coef is

notnone

, \ "must fit before predict"

assert x_predict.shape[1]

==len

(self.coef)

, \ "the feature number of x_predict must be equal to x_train"

x_b = np.hstack(

[np.ones(

(len

(x_predict),1

)), x_predict]

)return self.sigmoid(x_b.dot(self._theta)

)def

predict

(self, x_predict)

:"""給定待**資料集x_predict,返回表示x_predict的結果向量"""

assert self.intercept is

notnone

and self.coef is

notnone

, \ "must fit before predict!"

assert x_predict.shape[1]

==len

(self.coef)

, \ "the feature number of x_predict must be equal to x_train"

prob = self.predict_proba(x_predict)

return np.array(prob >=

0.5, dtype=

'int'

)def

score

(self, x_test, y_test)

:"""根據測試資料集x_test和y_test確定當前模型的準確度"""

y_predict = self.predict(x_test)

return accuracy_score(y_test, y_predict)

def__repr__

(self)

:return

"logisticregression()"

機器學習 邏輯回歸 Python實現邏輯回歸

coding utf 8 author 蔚藍的天空tom import numpy as np import os import matplotlib.pyplot as plt from sklearn.datasets import make blobs global variable path...

TensorFlow 邏輯回歸手寫數字識別

import input data import cv2 import numpy as np import tensorflow as tf 訓練資料來源 mnist input data.read data sets data one hot true train data mnist.trai...

python簡單實現邏輯回歸 LR

import numpy as np import math from sklearn import datasets from collections import counter infinity float 2 31 邏輯回歸的實現 defsigmodformatrix xb,thetas p...