97 lines
2.7 KiB
Python
97 lines
2.7 KiB
Python
# 1. 收集數據
|
||
import torch
|
||
|
||
t_c = [0.5, 14.0, 15.0, 28.0, 11.0, 8.0, 3.0, -4.0, 6.0, 13.0, 21.0]
|
||
t_u = [35.7, 55.9, 58.2, 81.9, 56.3, 48.9, 33.9, 21.8, 48.4, 60.4, 68.4]
|
||
|
||
t_c = torch.tensor(t_c).unsqueeze(1) # 升維,作用是將資料轉化成單個樣本
|
||
t_u = torch.tensor(t_u).unsqueeze(1)
|
||
|
||
n_samples = t_u.shape[0] # 樣本量
|
||
n_test = int(n_samples * 0.2) # 測試集數量
|
||
|
||
shuffled_samples = torch.randperm(n_samples) # 隨機化樣本
|
||
|
||
train_indics = shuffled_samples[:-n_test] # 訓練集索引
|
||
test_indics = shuffled_samples[-n_test:] # 測試集索引
|
||
|
||
# 訓練集
|
||
t_u_train = t_u[train_indics]
|
||
t_c_train = t_c[train_indics]
|
||
# 測試集
|
||
t_u_test = t_u[test_indics]
|
||
t_c_test = t_c[test_indics]
|
||
|
||
print(t_u_train)
|
||
print(t_c_train)
|
||
print(t_u_test)
|
||
print(t_c_test)
|
||
|
||
# 歸一化
|
||
t_u_mean = t_u_train.mean()
|
||
t_u_std = t_u_train.std()
|
||
|
||
t_u_train_norm = (t_u_train - t_u_mean) / t_u_std
|
||
t_u_test_norm = (t_u_test - t_u_mean) / t_u_std
|
||
|
||
t_c_mean = t_c_train.mean()
|
||
t_c_std = t_c_train.std()
|
||
|
||
t_c_train_norm = (t_c_train - t_c_mean) / t_c_std
|
||
t_c_test_norm = (t_c_test - t_c_mean) / t_c_std
|
||
|
||
# 2. 搭建模型
|
||
# import torch.nn as nn
|
||
#
|
||
# linear_model = nn.Linear(in_features = 1, out_features = 1) # in_features 表示輸入神經元的個數,out_features 表示輸出神經元的個數
|
||
|
||
import torch.nn as nn
|
||
from collections import OrderedDict
|
||
|
||
# 構建一個多層神經網路,隱藏層有 13 個神經元,輸出層有 1 個神經元
|
||
neural_network = nn.Sequential(OrderedDict([
|
||
('hidden', nn.Linear(1, 13)), # 隱藏層
|
||
('hidden_activation', nn.Tanh()), # 隱藏層激勵函數
|
||
('output', nn.Linear(13, 1)) # 輸出層
|
||
]))
|
||
|
||
# 3. 宣告優化器和損失函數
|
||
optimizer = torch.optim.SGD(
|
||
neural_network.parameters(),
|
||
lr=1e-2
|
||
)
|
||
|
||
def loss_fn(t_p, t_c):
|
||
return ((t_p - t_c) ** 2).mean()
|
||
|
||
# 4. 宣告 train loop
|
||
def train_loop(n_epochs, optimizer, model, loss_fun, t_u_train, t_u_test, t_c_train, t_c_test):
|
||
for epoch in range(1, n_epochs + 1):
|
||
t_p_train = model(t_u_train)
|
||
loss_train = loss_fun(t_p_train, t_c_train)
|
||
|
||
t_p_test = model(t_u_test)
|
||
loss_test = loss_fun(t_p_test, t_c_test)
|
||
|
||
optimizer.zero_grad()
|
||
loss_train.backward()
|
||
optimizer.step()
|
||
|
||
if epoch == 1 or epoch % 10 == 0:
|
||
print(f'Epoch {epoch}: Training Loss: {loss_train:.4f}')
|
||
print(f'Test Loss: {loss_test:.4f}')
|
||
|
||
# 5. 開始訓練
|
||
train_loop(
|
||
n_epochs = 500,
|
||
optimizer = optimizer,
|
||
model = neural_network,
|
||
loss_fun = loss_fn,
|
||
t_u_train = t_u_train_norm,
|
||
t_u_test = t_u_test_norm,
|
||
t_c_train = t_c_train_norm,
|
||
t_c_test = t_c_test_norm
|
||
)
|
||
|
||
print('output', neural_network(t_u_test_norm) * t_c_std + t_c_mean)
|
||
print('val', t_c_test) |