損失函式 ssim和L1混合

2021-10-20 09:47:03 字數 4788 閱讀 3058

ssim佔比alpha

l1佔比1-alpha

#%%

import tensorflow as tf

import numpy as np

import torch

#模仿matlab的fspecial函式,建立濾波運算元(計算ssim用)

def _tf_fspecial_gauss(size, sigma, channels=1):

"""function to mimic the 'fspecial' gaussian matlab function

"""x_data, y_data = np.mgrid[-size//2 + 1:size//2 + 1, -size//2 + 1:size//2 + 1]

x_data = np.expand_dims(x_data, axis=-1)

x_data = np.expand_dims(x_data, axis=-1)

y_data = np.expand_dims(y_data, axis=-1)

y_data = np.expand_dims(y_data, axis=-1)

x = tf.constant(x_data, dtype=tf.float32)

y = tf.constant(y_data, dtype=tf.float32)

g = tf.exp(-((x**2 + y**2)/(2.0*sigma**2)))

window = g / tf.reduce_sum(g)

return tf.tile(window, (1,1,channels,channels))

#高斯卷積

def tf_gauss_conv(img, filter_size=11, filter_sigma=1.5):

_, height, width, ch = img.get_shape().as_list()

size = min(filter_size, height, width)

sigma = size * filter_sigma / filter_size if filter_size else 0

window = _tf_fspecial_gauss(size, sigma, ch) # window shape [size, size]

padded_img = tf.pad(img, [[0, 0], [size//2, size//2], [size//2, size//2], [0, 0]], mode="constant")

return tf.nn.conv2d(padded_img, window, strides=[1,1,1,1], padding='valid')

#高斯加權的l1正則化

def tf_gauss_weighted_l1(img1, img2, mean_metric=true, filter_size=11, filter_sigma=1.5):

diff = tf.abs(img1 - img2)

l1 = tf_gauss_conv(diff, filter_size=filter_size, filter_sigma=filter_sigma)

if mean_metric:

return tf.reduce_mean(l1)

else:

return l1

#計算ssim

def tf_ssim(img1, img2, cs_map=false, mean_metric=true, filter_size=11, filter_sigma=1.5):

_, height, width, ch = img1.get_shape().as_list()

size = min(filter_size, height, width)

sigma = size * filter_sigma / filter_size if filter_size else 0

window = _tf_fspecial_gauss(size, sigma, ch) # window shape [size, size]

k1 = 0.01

k2 = 0.03

l = 1 # depth of image (255 in case the image has a differnt scale)

c1 = (k1*l)**2

c2 = (k2*l)**2

#求取滑塊內均值ux uy,均方值ux_sq

padded_img1 = tf.pad(img1, [[0, 0], [size//2, size//2], [size//2, size//2], [0, 0]], mode="constant") #img1 上下左右補零

padded_img2 = tf.pad(img2, [[0, 0], [size//2, size//2], [size//2, size//2], [0, 0]], mode="constant") #img2 上下左右補零

mu1 = tf.nn.conv2d(padded_img1, window, strides=[1,1,1,1], padding='valid') #利用滑動視窗,求取視窗內影象的的加權平均

mu2 = tf.nn.conv2d(padded_img2, window, strides=[1,1,1,1], padding='valid')

mu1_sq = mu1*mu1 #img(x,y) ux*ux 均方

mu2_sq = mu2*mu2 #img(x,y) uy*uy

mu1_mu2 = mu1*mu2 #img(x,y) ux*uy

#求取方差,方差等於平方的期望減去期望的平方,平方的均值減去均值的平方

paddedimg11 = padded_img1*padded_img1

paddedimg22 = padded_img2*padded_img2

paddedimg12 = padded_img1*padded_img2

sigma1_sq = tf.nn.conv2d(paddedimg11, window, strides=[1,1,1,1],padding='valid') - mu1_sq #sigma1方差

sigma2_sq = tf.nn.conv2d(paddedimg22, window, strides=[1,1,1,1],padding='valid') - mu2_sq #sigma2方差

sigma12 = tf.nn.conv2d(paddedimg12, window, strides=[1,1,1,1],padding='valid') - mu1_mu2 #sigma12協方差,乘積的均值減去均值的乘積

ssim_value = tf.clip_by_value(((2*mu1_mu2 + c1)*(2*sigma12 + c2))/((mu1_sq + mu2_sq + c1)*(sigma1_sq + sigma2_sq + c2)), 0, 1)

if cs_map: #只考慮contrast對比度,structure結構,不考慮light亮度

cs_map_value = tf.clip_by_value((2*sigma12 + c2)/(sigma1_sq + sigma2_sq + c2), 0, 1) #對比度結構map

value = (ssim_value, cs_map_value)

else:

value = ssim_value

if mean_metric: #求取矩陣的均值,否則返回ssim矩陣

value = tf.reduce_mean(value)

return value

def tf_ssim_l1_loss(img1, img2, mean_metric=true, filter_size=11, filter_sigma=1.5, alpha=0.84):

l1 = tf_gauss_weighted_l1(img1, img2, mean_metric=false, filter_size=filter_size, filter_sigma=filter_sigma)

if mean_metric:

loss_ssim= 1 - tf_ssim(img1, img2, cs_map=false, mean_metric=true, filter_size=filter_size, filter_sigma=filter_sigma)

loss_l1 = tf.reduce_mean(l1)

value = loss_ssim * alpha + loss_l1 * (1-alpha)

else:

loss_ssim= 1 - tf_ssim(img1, img2, cs_map=false, mean_metric=false, filter_size=filter_size, filter_sigma=filter_sigma)

value = loss_ssim * alpha + l1 * (1-alpha)

return value, loss_ssim

#%%img1 = np.arange(10000,dtype=np.float32).reshape([1,100,100,1])

img2 = np.arange(10000,dtype=np.float32).reshape([1,100,100,1])-1

l1_loss = tf_ssim_l1_loss(tf.constant(img1),tf.constant(img2))

with tf.session() as sess:

print(sess.run(l1_loss))

回歸損失函式 L1和L2比較

簡而言之,使用平方誤差更容易解決問題,但使用絕對誤差對於異常值更魯棒。我們來看一下為什麼。不管我們什麼時候訓練機器學習模型,我們的目標都是想找到乙個點將損失函式最小化。當然,當 值正好等於真值時,這兩個函式都會達到最小值。我們快速瀏覽一下這兩種函式的python 我們可以自己寫函式,也可以用skle...

損失函式L1正則化稀疏性

機器學習演算法中為了防止過擬合,會在損失函式中加上懲罰因子,即為l1 l2正則化。因此最終的優化目標函式為 f x l x c reg x c 0 本文只講解l1正則化,因此reg x x 首先l x 和reg x 都是連續函式,因此f x 也是連續函式 其次l x 和reg x 都是凸函式,因此f...

神經網路損失函式中的正則化項L1和L2

神經網路中損失函式後一般會加乙個額外的正則項l1或l2,也成為l1範數和l2範數。正則項可以看做是損失函式的懲罰項,用來對損失函式中的係數做一些限制。正則化描述 l1正則化是指權值向量w中各個元素的絕對值之和 l2正則化是指權值向量w中各個元素的平方和然後再求平方根 一般都會在正則化項之前新增乙個係...