pytorch 基本資料初始化

2021-09-25 21:48:05 字數 4211 閱讀 3206

import torch

#張量初始化

# print ('一般性初始化:', torch.tensor([1,2,3]))

# print('隨機初始化:', torch.arange(0,4,step = 1))

# print('隨機初始化:',torch.range(0,3,step = 1))

# the step is the skipwindow

# print('隨機初始化:',torch.linspace(0,3,steps = 4))

# print('隨機初始化:',torch.logspace(0,3,steps = 4))

# the steps is numbers

# weight = torch.full((3,4), 0.6)

# # the percentage args

# print(torch.bernoulli(weight))

# # generate 0 or 1

# # weights = torch.tensor([[1,100],[100,1],[1,1]],dtype = torch.float32)

# # print (torch.mutinormal(weights,1))

# # the weight and the select times

# print('生成隨機序列:',torch.randperm(10))

# # 0 - 9 without sort tensor

# print('生成均勻分布:',torch.randint(low = 0, high = 4, size = (3, 4)))

# print('生成均勻分布:',torch.randint_like(torch.ones(3, 4), low = 0, high = 4))

# # include low but without high

# print('生成標準均勻分布:', torch.rand(3, 4))

# print('生成標準均勻分布:', torch.rand_like(torch.ones(3, 4)))

# # 0 - 1

# print('生成標準正太分布:',torch.randn(3, 4))

# print('生成標準正太分布:',torch.randn_like(torch.ones(3, 4)))

# mean = torch.tensor([1., 2.])

# std = torch.tensor([4., 2.])

# print('生成一般正態分佈:',torch.normal(mean, std))

# # notice that the mean and the std should be float

# test_tensor = torch.tensor([1,2,3,4]).reshape(1,2,2,1)

# print(test_tensor.dim())

# print(test_tensor.size())

# print(test_tensor.numel())

# 張量選擇

# tensor_to_be_selected = torch.arange(12).reshape(3, 4)

# index_selected_by = torch.tensor([0, 2])

# print ('index select:', tensor_to_be_selected.index_select(1, index_selected_by))

# # index_select dim is remain

# mask_selected_by = torch.tensor([[0, 0, 0, 1],[1, 0, 1, 0],[0, 1, 0, 0]], dtype = torch.uint8)

# print ('mask select:', tensor_to_be_selected.masked_select(mask_selected_by))

# # masked_select dim is reduced to 1

# take_selected_by = torch.tensor([2,3,4])

# print ('take select:', tensor_to_be_selected.take(take_selected_by))

# # take_select dim is reduced to 1

# print(torch.arange(4).reshape(2, 2).repeat(4, 2))

# # the result dims is (2*4,2*2)

# tensor_one = torch.tensor([[1, 2]])

# tensor_two = torch.tensor([[3, 4]])

# print(torch.cat((tensor_one, tensor_two), 0))

# # 1x2 cat 1x2 ——>2x2

# print(torch.cat((tensor_one, tensor_two), 1))

# # 1x2 cat 1x2 ——>1x4

# tensor_to_be_selected = torch.arange(12, dtype = torch.float32).reshape(3, 4)

# print(tensor_to_be_selected.reciprocal())

# print(tensor_to_be_selected.sqrt())

# print(tensor_to_be_selected.rsqrt())

# positive_tensor = torch.tensor([0.5],dtype = torch.float32)

# nagtive_tensor = torch.tensor([-0.5],dtype = torch.float32)

# print('符號函式:',torch.sign(positive_tensor), torch.sign(nagtive_tensor))

# print('絕對值函式:', torch.abs(positive_tensor), torch.abs(nagtive_tensor))

# print('向下取整:',torch.floor(positive_tensor), torch.floor(nagtive_tensor))

# print('向上取整:',torch.ceil(positive_tensor), torch.ceil(nagtive_tensor))

# print('四捨五入:',torch.round(positive_tensor), torch.round(nagtive_tensor))

# print('截斷取整:',torch.trunc(positive_tensor),torch.trunc(nagtive_tensor))

# print('取小數:',torch.frac(positive_tensor), torch.frac(nagtive_tensor))

# tensor_one = torch.tensor([1, 2, 3])

# tensor_two = torch.tensor([2, 1, 3])

# print('一維張量點積:', torch.dot(tensor_one, tensor_two))

# tensor_one = torch.arange(4).reshape(2, 2)

# tensor_two = torch.tensor([1, 2])

# print('二維點積一維:', torch.mv(tensor_one, tensor_two))

# tensor_one = torch.arange(4).reshape(2, 2)

# tensor_two = torch.tensor([[1, 2], [3, 4]])

# tensor_one = torch.arange(1, 5, dtype = torch.float32)

# print('方差:',tensor_one.var())

# print('最大值:',tensor_one.max())

# print('中位數:',tensor_one.median())

# print('乘積:',tensor_one.prod())

# print('第三大的數字是:',tensor_one.kthvalue(3))

pytorch 關於權重(初始化 固定)

emb那種可以直接from pretrained 利用requires grad false,這樣只是不更新,但是還是會計算並占用視訊記憶體 class net nn.module def init self super net,self init self.conv1 nn.conv2d 1,6,...

資料初始化

資料初始化的面試題 a 乙個類的初始化過程 b 子父類的構造執行過程 c 分層初始化 看程式寫結果 a 成員變數的問題 int x 10 成員變數是基本型別 student s new student 成員變數是引用型別 b 乙個類的初始化過程 先父後子,先靜後非靜 成員變數的初始化 預設初始化 顯...

初始化 指定初始化

id alloc 物件的誕生過程,主要是從作業系統獲得一塊足夠大的記憶體,以存放該類的全部例項變數,並將其指定為存放記憶體物件的實力變數的位置。alloc方法同時將這塊記憶體全部設定為0。結果是 bool變數初始化為no,所有的int型別變數為0,float變數為0.0,所有的指標為nil.obje...