簡易遷移學習

2021-09-28 15:02:23 字數 4781 閱讀 3281

這裡利用官方遷移學習**, 用它的inception-v3模型來進行自己改乙個簡單的遷移學習**

****:

以下自己改的簡易**

資料集為10類,有官方的5類花,還有前面**裡收集的5類車共計6000張,此**簡單呼叫模型,並未使用官方模板。

import tensorflow as tf

from tensorflow.python.platform import gfile

import numpy as np

from sklearn.model_selection import train_test_split

#gpu

import os

os.environ[

"cuda_visible_devices"]=

"3"data = os.

listdir

(r'c:\users\administrator\desktop\all_datas'

)path_list =

label_data =

# 獲取全路徑

def load_data

(data)

: num_class =

0 #獲取下標及檔名

for num,img_files in

enumerate

(data)

: imgs = os.

listdir

(r'c:\users\administrator\desktop\all_datas/'

+img_files)

for img in imgs:

path = os.path.

join

(r'c:\users\administrator\desktop\all_datas'

,img_files,img)

path_list.

(path)

label_data.

(num)

num_class +=

1 data_y = np.

array

(label_data)

return data_y,num_class

data_y,num_class =

load_data

(data)

print

(num_class)

print

(data_y)

print

(len

(data_y)

)# 根據路徑提取內容

def feature

(filenames)

: img_list =

for i in filenames:

img_data = gfile.

gfile

(i,'rb').

read()

img_list.

(img_data)

return img_list

img_list =

feature

(path_list)

# img_list = np.

array

(img_list)

# print

(img_list.shape) #(

903,

)bottleneck_tensor_name

='pool_3/_reshape:0' # inception-v3模型中代表瓶頸層結果的張量名稱

jpeg_data_tensor_name

='decodejpeg/contents:0'

# 模型特徵提取

def model_feature_extraction

(img_list)

: data_x =

with tf.

graph()

.as_default()

as graph:

with gfile.

fastgfile

(r'./inception_dec_2015/tensorflow_inception_graph.pb'

,'rb'

)as f:

graph_def = tf.

graphdef()

graph_def.

parsefromstring

(f.read()

) bottleneck_tensor, jpeg_data_tensor = tf.

import_graph_def

( graph_def,

return_elements=

[bottleneck_tensor_name

,jpeg_data_tensor_name

])# 從匯入的圖中得到的與return_element中的名稱相對應的操作和/或張量物件的列表。

with tf.

session

(graph=graph)

as sess:

sess.

run(tf.

global_variables_initializer()

)for i in img_list:

bottleneck_values = sess.

run(bottleneck_tensor,

) bottleneck_values = np.

squeeze

(bottleneck_values)

data_x.

(bottleneck_values)

data_x = np.

array

(data_x)

return data_x

data_x =

model_feature_extraction

(img_list)

train_x, test_x , train_y,test_y =

train_test_split

(data_x,data_y,test_size=

0.2,random_state=7)

x = tf.

placeholder

(tf.float32,

[none,

2048])

y = tf.

placeholder

(tf.int64,

[none]

)keep_prob = tf.

placeholder

(tf.float32)

fc1 = tf.layers.

dense

(x,1024

,activation=tf.nn.relu)

fc1 = tf.nn.

dropout

(fc1,keep_prob=keep_prob)

a5 = tf.layers.

dense

(fc1,num_class)

cost = tf.nn.

sparse_softmax_cross_entropy_with_logits

(labels=y,logits=a5)

optimizer = tf.train.

adamoptimizer

(0.00003).

minimize

(cost)

accuracy = tf.

reduce_mean

(tf.

cast

(tf.

equal

(tf.

argmax

(a5,1)

,y),tf.float32)

)sess = tf.

session()

sess.

run(tf.

global_variables_initializer()

)step =

0for i in

range(1

,1501):

c,a,o = sess.

run(

[cost,accuracy,optimizer]

,feed_dict=

) step +=

100if step >= train_x.shape[0]

: step =

0if i %

100==0:

print

(i,np.

mean

(c),a)

print

(sess.

run(accuracy,feed_dict=

))

10

[0 0 0 ... 9 9 9]

6014

1000 0.03149219 1.0

2000 0.010143225 1.0

3000 0.0062059807 1.0

4000 0.0020856506 1.0

5000 0.0027446924 1.0

6000 0.00093447714 1.0

7000 0.0025312935 1.0

8000 0.00027133815 1.0

9000 0.0015174439 1.0

10000 0.00019120889 1.0

0.9459684

used210.72s

遷移學習 自我學習

最近在看ng的深度學習教程,看到self taught learning的時候,對一些概念感到很陌生。作為還清技術債的乙個環節,用半個下午的時間簡單搜了下幾個名詞,以後如果會用到的話再深入去看。監督學習在前一篇部落格中討論過了,這裡主要介紹下遷移學習 自我學習。因為監督學習需要大量訓練樣本為前提,同...

深度學習 遷移學習

遷移學習 把公共的資料集的知識遷移到自己的問題上,即將已經訓練好的模型的一部分知識 網路結構 直接應用到另乙個類似模型中去 小資料集 利用遷移學習,應用其他研究者建立的模型和引數,用少量的資料僅訓練最後自定義的softmax網路。從而能夠在小資料集上達到很好的效果。例如 imagenet 資料集,它...

遷移學習 domain adaption

一 概念 1 learning from scratch 即學乙個cnn網路with random initialization 2 在新的學習任務中,可以利用現有訓練好的imagenet網路提取特徵,去掉最後乙個分類的全連線層,在classifier層之前提取4096維的特徵,這些特徵稱為cnn ...