利用tensorflow實現自編碼

2021-09-24 11:23:55 字數 4070 閱讀 8626

1、原理

監督神經網路的本質就是對特徵的不斷學習挖掘的過程,而中間層就可以理解為對上一層特徵的提取,自編碼就是對神經網路中間層的提取,所以我們可以把高維的資料經過幾層對映到低維,在進行還原的過程

2、測試資料

構建2個三維的球,將其對映到2維,檢視資料分布

def make_ball(r = 2,a=0,b=0,c=0):

points =

for i in range(400):

t = np.random.choice(360)

t2 = np.random.choice(180)

# r = np.random.random() * r

x = r * np.sin(t2 * np.pi / 180) * np.cos(t * np.pi / 180) + a

y = r * np.sin(t2 * np.pi / 180) * np.sin(t * np.pi / 180) + b

z = r * np.cos(t2 * np.pi / 180) + c

return np.array(points)

3、模型搭建

由於例子的資料比較簡單,所以就只用了1層的隱藏層,這個隱藏層就是我們要提取的主成分了,用梯度下降法進行優化,損失函式用還原資料與原資料的差的均方誤差,最後模型收斂即完成訓練

def build_autoencode(x):

l,w = x.shape

x_input = tf.placeholder(dtype=tf.float32,shape=[none,w])

# decode

with tf.name_scope("decode"):

w,b = init_wb(shape=[w,2])

decode = tf.nn.softplus(tf.matmul(x_input,w) + b)

# encode

with tf.name_scope("encode"):

w,b = init_wb(shape=[2,w])

encode = tf.matmul(decode,w) + b

with tf.name_scope("loss"):

loss = tf.reduce_mean(tf.square(tf.subtract(x_input,encode)))

with tf.name_scope("train"):

train_op = tf.train.gradientdescentoptimizer(1e-3).minimize(loss)

init = tf.global_variables_initializer()

sess = tf.session()

sess.run(init)

for i in range(10000):

sess.run(train_op,feed_dict = )

if i % 100 == 0:

loss_val = sess.run(loss,feed_dict=)

encode_val,decode_val = sess.run([encode,decode],feed_dict=)

print("iter:",i,"loss:",loss_val)

return sess,decode,encode,x_input

完整**:

import tensorflow as tf

import numpy as np

import matplotlib.pyplot as plt

from matplotlib import cm

from mpl_toolkits.mplot3d import axes3d

def make_ball(r = 2,a=0,b=0,c=0):

points =

for i in range(400):

t = np.random.choice(360)

t2 = np.random.choice(180)

x = r * np.sin(t2 * np.pi / 180) * np.cos(t * np.pi / 180) + a

y = r * np.sin(t2 * np.pi / 180) * np.sin(t * np.pi / 180) + b

z = r * np.cos(t2 * np.pi / 180) + c

return np.array(points)

points = make_ball()

points2 = make_ball(r=2,a=3,b=3,c=3)

points3 = np.concatenate((points,points2),axis=0)

def plot3d(points):

fig = plt.figure()

ax = axes3d(fig)

ax.plot_trisurf(points[:,0],points[:,1],points[:,2],cmap=cm.jet,linewidth=0.9)

plt.show()

def plot2d(point):

if len(point) > 400:

plt.scatter(point[:400,0],point[:400,1])

plt.scatter(point[400:,0],point[400:,1])

else:

plt.scatter(point[:,0],point[:,1])

plt.show()

def init_wb(shape):

w = tf.variable(tf.truncated_normal(shape=shape,stddev=0.1),dtype=tf.float32)

b = tf.variable(0.,dtype=tf.float32)

return w,b

# 構建自編碼器

def build_autoencode(x):

l,w = x.shape

x_input = tf.placeholder(dtype=tf.float32,shape=[none,w])

# decode

with tf.name_scope("decode"):

w,b = init_wb(shape=[w,2])

decode = tf.nn.softplus(tf.matmul(x_input,w) + b)

# encode

with tf.name_scope("encode"):

w,b = init_wb(shape=[2,w])

encode = tf.matmul(decode,w) + b

with tf.name_scope("loss"):

loss = tf.reduce_mean(tf.square(tf.subtract(x_input,encode)))

with tf.name_scope("train"):

train_op = tf.train.gradientdescentoptimizer(1e-3).minimize(loss)

init = tf.global_variables_initializer()

sess = tf.session()

sess.run(init)

for i in range(10000):

sess.run(train_op,feed_dict = )

if i % 100 == 0:

loss_val = sess.run(loss,feed_dict=)

encode_val,decode_val = sess.run([encode,decode],feed_dict=)

print("iter:",i,"loss:",loss_val)

return sess,decode,encode,x_input

sess,decode,encode,x_input = build_autoencode(points3)

dd = sess.run(decode,feed_dict=)

plot2d(dd[:400])

Tensorflow實現MNIST資料自編碼 1

自編碼網路能夠自學習樣本特徵的網路,屬於無監督學習模型的網路,可以從無標註的資料中學習特徵,它可以給出比原始資料更好的特徵描述,具有較強的特徵學習能力。主要的網路結構就是高維特徵樣本 編碼成 低維特徵 解碼回 高維特徵,下面以mnist資料集為示例進行演示 import tensorflow as ...

Tensorflow 自實現學習率衰減

官方各種衰減策略 將學習率設定為不可訓練的variable,這樣學習率就是圖中乙個運算節點,而非標量 self.decay learning rate tf.variable float self.learning rate trainable false,dtype tf.float32 優化演算...

利用TensorFlow實現線性回歸模型

準備資料 import numpy as np import tensorflow as tf import matplotlib.pylot as plt 隨機生成1000個點,圍繞在y 0.1x 0.3的直線周圍 num points 1000vectors set for i in range...