2019 8 6學習筆記

2021-09-25 22:48:16 字數 4104 閱讀 3991

#tensorflow實現反向傳播演算法(23列**11列)

import tensorflow as tf

import numpy as np

from sklearn import preprocessing

with open (r"e:\神經網路\bp神經網路\test\qmatrix.dat__irp.dat") as feature:

feature_list = feature.readlines()

feature_list = [i.strip('\n')for i in feature_list]

feature_list = [i.replace(' ','') for i in feature_list]

myfeature=

for i in feature_list:

j = list(i)

numj =

for num in j:

arrayfeature = np.array(myfeature)

with open (r"e:\神經網路\bp神經網路\test\qmatrix.dat_a.dat_amp.dat") as score:

score1 = score.readlines()

score_list = [i.strip('\n') for i in score1]

score_list = [i.replace(' ','') for i in score_list]

any_score =

for j in range(11):

first_score =

for i in score_list:

arrayany_score=np.array(any_score)

arrayany_score1 =

enc = preprocessing.onehotencoder()

enc.fit([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])

for nn in range(720):

arrayany_score = np.array(arrayany_score1)

del arrayany_score1

n_input = 23

n_classes = 2

max_epochs = 10000#10000

learning_rate = 0.15

#batch_size = 1

seed = 0

n_hidden =11#11

training_set_inputs = arrayfeature

def sigmaprime(x):

return tf.multiply(tf.sigmoid(x),tf.subtract(tf.constant(1.0),tf.sigmoid(x)))

x_in = tf.placeholder(tf.float32,[none,n_input])#這裡我們為它指定乙個[none,784]的形狀,其中784是單個扁平28乘28畫素mnist影象的維數,無表示第乙個維度,對應於批量大小,可以任何大小

y = tf.placeholder(tf.float32,[none,n_classes])

def multilayer_perceptron(x,weights,biases):

h_layer_1 = tf.add(tf.matmul(x,weights['h1']),biases['h1'])

out_layer_1 = tf.sigmoid(h_layer_1)

h_out = tf.matmul(out_layer_1,weights['out'])+biases['out']

return tf.sigmoid(h_out),h_out,out_layer_1,h_layer_1

weights =

biases =

y_hat,h_2,o_1,h_1 = multilayer_perceptron(x_in,weights,biases)

err = y_hat - y

delta_2 = tf.multiply(err,sigmaprime(h_2))

delta_w_2 = tf.matmul(tf.transpose(o_1),delta_2)

wtd_error = tf.matmul(delta_2,tf.transpose(weights['out']))

delta_1 = tf.multiply(wtd_error,sigmaprime(h_1))

delta_w_1 = tf.matmul(tf.transpose(x_in),delta_1)

eta = tf.constant(learning_rate)

step = [tf.assign(weights['h1'],tf.subtract(weights['h1'],

tf.multiply(eta,delta_w_1))),

tf.assign(biases['h1'],tf.subtract(biases['h1'],tf.multiply(eta,tf.reduce_mean(delta_1,axis=[0]))))

,tf.assign(weights['out'],tf.subtract(weights['out'],

tf.multiply(eta,delta_w_2))),tf.assign(biases['out'],tf.subtract(biases['out'],tf.multiply(eta,tf.reduce_mean(delta_2,axis=[0]))))]

acct_mat = tf.equal(tf.argmax(y_hat,1),tf.argmax(y,1))#tf.argmax(input,axis)根據axis取值的不同返回每行或者每列最大值的索引,1是每行上最大

accuracy = tf.reduce_sum(tf.cast(acct_mat,tf.float32))#

init = tf.global_variables_initializer()

##with tf.session() as sess:

# sess.run(init)

# for epoch in range(max_epochs):

# batch_xs,batch_ys = mnist.train.next_batch(batch_size)

# sess.run(step,feed_dict = )

# if epoch % 1000 ==0:

# acc_test = sess.run(accuracy,feed_dict = )#accuracy.eval(....)等價sess.run(accuracy....)

# acc_train = sess.run(accuracy,feed_dict = )

# print('epoch: accuracy train%: accuracy test%:'.format(epoch,acc_train/600,(acc_test/100)))

# print(sess.run(y_hat,feed_dict = ))

# print(sess.run(tf.argmax(y_hat,1),feed_dict = ))

for n,nw in enumerate(np.linspace(0,20,11,dtype=int)):

with tf.session() as sess:

sess.run(init)

for epoch in range(max_epochs):

# for record, target in zip(training_set_inputs, np.array([arrayany_score]).t):

sess.run(step,feed_dict = )

acc = sess.run(accuracy,feed_dict = )

print('第列accuracy%:'.format(n,acc/720))

2019 8 6 拓展kmp演算法

首先感謝2014年一位博主寫的部落格 kmp 寫的很詳細 不過有一點小小的錯誤 我就在此基礎上用圖來表示 我把大家視為啥都不懂的小白 現在我們有乙個字串a 我們有一天突然想要知道對於a的每一位開始 最多能和多長的a的字首完全相等?比如我們可愛的ywwyww字串 就是這樣的乙個陣列 6 0 0 3 0...

學習筆記 雜湊學習筆記

hash基本原理 hash就是乙個像函式一樣的東西,你放進去乙個值,它給你輸出來乙個值。輸出的值就是hash值。一般hash值會比原來的值更好儲存 更小 或比較。那字串hash就非常好理解了。就是把字串轉換成乙個整數的函式。而且要盡量做到使字串對應唯一的hash值。它的主要思路是選取恰當的進製,可以...

學習筆記 CentOS 學習筆記01

簡單的做個課堂筆記 虛擬機器用的是vmware,系統是centos cd etc sysconfig network scripts pwdls 顯示列表 cat ifcfg eth0 檢視檔案內容 vi ifcfg eth0 進入vi編輯器 onboot no 原始設定 x逐字刪除 d刪除整行 a...