深度學習 3 手寫乙個單層的神經網路

2021-10-04 02:53:04 字數 3210 閱讀 7175

# *******************************

# target:手寫乙個單隱層的神經網路

# author: magic

# steps:1、定義網路結構(指定輸出層、隱藏層、輸出層的大小)

# 2、初始化模型引數

# 3、迴圈操作:執行前向傳播/計算損失/執行後向傳播/權值更新

# *******************************

import numpy as np

import tensorflow as tf

#定義網路結構

def layer_sizes(x,y):

n_x = x.shape[0]

n_h = 4

n_y = y.shape[0]

return (n_x,n_h,n_y)

#初始化模型引數

def initialize_parameters(n_x,n_h,n_y):

w1 = np.random.randn(n_h,n_x)*0.01

b1 = np.zeros((n_h,1))

w2 = np.random.randn(n_y,n_h)*0.01

b2 = np.zeros((n_y,1))

assert(w1.shape == (n_h,n_x))

assert(b1.shape == (n_h,1))

assert(w2.shape == (n_y,n_h))

assert(b2.shape == (n_y,1))

parameters =

return parameters

#前向傳播

def forward_propagation(x,parameters):

w1 = parameters['w1']

b1 = parameters['b1']

w2 = parameters['w2']

b2 = parameters['b2']

z1 = np.dot(w1,x) + b1

a1 = np.tanh(z1)

z2 = np.dot(w2,z1) + b2

a2 = tf.sigmoid(z2)

assert(a2.shape == (1,x.shape[1]))

cache =

return a2,cache

#定義計算損失函式

def compute_coat(a2,y,parameters):

m = y.shape[1]

logprobs = np.multiply(np.log(a2),y) + np.multiply(np.log(1-a2),1 - y)

cost = -1/m * np.sum(logprobs)

cost = np.squeeze(cost)

assert(isinstance(cost,float))

return cost

#定義反向傳播函式

def backward_propagation(parameters,cache,x,y):

m = x.shape[1]

w1 = parameters['w1']

w2 = parameters['w2']

a1 = cache['a1']

a2 = cache['a2']

dz2 = a2 - y

dw2 = 1/m * np.dot(dz2,a1.t)

db2 = 1/m * np.sum(dz2,axis = 1,keepdims = true)

dz1 = np.dot(w2.t,dz2) * (1 - np.power(a1,2))

dw1 = 1/m * np.dot(dz1,x.t)

db1 = 1/m * np.sum(dz1,axis=1,keepdims = true)

grads =

return grads

#定義權值更新函式

def update_parameters(parameters,grads,learning_rate = 1.2):

w1 = parameters['w1']

b1 = parameters['b1']

w2 = parameters['w2']

b2 = parameters['b2']

dw1 = grads['dw1']

db1 = grads['db1']

dw2 = grads['dw2']

db2 = grads['db2']

w1 -= dw1 * learning_rate

b1 -= db1 * learning_rate

w2 -= dw2 * learning_rate

b2 -= db2 *learning_rate

parameters =

return parameters

#封裝def nn_model(x,y,n_h,num_iterations = 10000,print_cost = false):

np.random.seed(3)

n_x = layer_sizes(x,y)[0]

n_y = layer_sizes(x,y)[2]

parameters = initialize_parameters(n_x,n_h,n_y)

w1 = parameters['w1']

b1 = parameters['b1']

w2 = parameters['w2']

b2 = parameters['b2']

for i in range(0,num_iterations):

a2,cache = forward_propagation(x,parameters)

cost = compute_coat(a2,y,parameters)

grads = backward_propagation(parameters,cache,x,y)

parameters = update_parameters(parameters,grads,learning_rate= 1.2)

if print_cost and i % 1000 == 0:

print("cost after iteration %i:%f"%(i,cost))

return parameters

手撕VUE原始碼 一 手寫乙個MVVM

class vue class observer observer data reactive key,value,data set newvalue class compiler compilertodata fregment else compilerelementnode node attr ...

用Python寫深度學習的第乙個神經網路程式

建立乙個自定義列表 如何建立乙個註腳 注釋也是必不可少的 katex數學公式 新的甘特圖功能,豐富你的文章 uml 圖表 flowchart流程圖 匯出與匯入 你好!這是你第一次使用markdown編輯器所展示的歡迎頁。如果你想學習如何使用markdown編輯器,可以仔細閱讀這篇文章,了解一下mar...

乙個簡單的tensorFlow關於神經網路的示例

這個示例源自 實戰google 深度學習框架 一書的第三章,實現了乙個簡單的前向網路的演算法。下面掛完整的 import tensorflow as tf from numpy.random import randomstate batch size 8 w1 tf.variable tf.rand...