tensorflow實現普通RNN

2021-08-31 09:47:21 字數 2005 閱讀 9759

# -*- coding:utf-8 _*-

"""@author:zhangxianke

@file: test.py

@time: 2018/11/09

"""from tensorflow.examples.tutorials.mnist import input_data

import tensorflow as tf

data = input_data.read_data_sets('./mnist_data', one_hot= true)

train_x, train_y = data.train.images, data.train.labels

test_x, test_y = data.test.images, data.test.labels

sequence_length = 28 #整個rnn結構輸入序列數(手寫體字行數)

frame_size = 28 #每個序列中的元素個數(每行資料中的元素個數)

n_hidden = 100 #隱藏層

n_class = 10

w = tf.variable(tf.random_normal([n_hidden, n_class]), dtype= tf.float32)

b = tf.variable(tf.zeros([1,n_class]), dtype= tf.float32)

x = tf.placeholder(tf.float32, [none, sequence_length * frame_size])

y = tf.placeholder(tf.float32, [none, n_class])

def rnn(_x, _w, _b):

_x = tf.reshape(_x, shape= [-1, sequence_length, frame_size])

cell = tf.nn.rnn_cell.basicrnncell(n_hidden) # rnn單元

output, h = tf.nn.dynamic_rnn(cell, _x, dtype= tf.float32)

return tf.nn.softmax(tf.matmul(output[:, -1, :], w) + b)

iters = 1000

batch = 1280

learn_rate = 0.001

y_predict = rnn(x, w, b)

loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y_predict, labels= y))

optm = tf.train.adamoptimizer(learn_rate).minimize(loss)

result = tf.equal(tf.argmax(y_predict, 1), tf.argmax(y, 1))

acc = tf.reduce_mean(tf.to_float(result))

#init

sess = tf.session()

sess.run(tf.global_variables_initializer())

for i in range(iters):

cost = count = 0.0

for batch_index in range(0, train_y.shape[0], batch):

count += 1

feed =

sess.run(optm, feed_dict= feed)

cost += sess.run(loss, feed_dict=feed)

cost /= count

feed_test =

accuracy = sess.run(acc, feed_dict= feed_test)

print ('第 %d 次迭代, 損失函式值為 %.6f, 準確率為 %.6f' % (i + 1, cost, accuracy))

基於Tensorflow和keras的RNN網路

1.簡單的rnn網路 rnn的公式可以是y wh b 簡單的rnn網路也被稱為elman網路。2.改進的rnn網路 當輸出取決於序列的前乙個和後乙個元素時,使用雙向rnn brnn brnn通過堆疊兩個rnn 前向層和後向層 來實現,並且輸出都是rnn隱藏狀態的結果。在前向層中,記憶狀態h從時間步長...

普通神經網路tensorflow例子1

隨機生成200個樣本,每個樣本2個特徵,輸出2個類別 import tensorflow as tf import matplotlib.pyplot as plt from sklearn.datasets import make classification defset color y if ...

反卷積實現 tensorflow 實現

deconv解卷積,實際是叫做conv transpose,conv transpose實際是卷積的乙個逆向過程,tf中,編寫conv transpose 的時候,心中想著乙個正向的卷積過程會很有幫助。想象一下我們有乙個正向卷積 input shape 1,5,5,3 kernel shape 2,...