commit
1c34eb21ca
22 changed files with 452 additions and 0 deletions
@ -0,0 +1,9 @@ |
|||||
|
# macos desktop mng cfg |
||||
|
.DS_Store |
||||
|
# jupyter dev |
||||
|
.ipynb_checkpoints |
||||
|
*/.ipynb_checkpoints/* |
||||
|
# vscode |
||||
|
*/.vscode/ |
||||
|
# python |
||||
|
__pycache__ |
@ -0,0 +1,54 @@ |
|||||
|
> 该项目是《Problem-independent machine learning (PIML)-based topology optimization—A universal approach》机器学习网络部分的复现 |
||||
|
|
||||
|
|
||||
|
## 环境依赖 |
||||
|
> PyTorch 2.1.0 CUDA 12.1 Ubuntu 20.04 (没有用到新特性,所以应该对旧版本兼容) |
||||
|
|
||||
|
``` |
||||
|
# matplotlib==3.8.0 |
||||
|
# numpy==1.26.2 |
||||
|
# scikit_learn==1.3.0 |
||||
|
# torch==2.1.0 |
||||
|
pip install -r requirements.txt |
||||
|
``` |
||||
|
|
||||
|
## Usage |
||||
|
TODO: 用argparse模块管理网络参数 |
||||
|
### Train |
||||
|
`python train.py` |
||||
|
> 编辑 `train.py` 内 `data_mod` 变量选择数据 |
||||
|
|
||||
|
### Test |
||||
|
`python test.py` |
||||
|
> 编辑 `test.py` 内 `dataload_mod` 和 `pretrained_mod` 变量选择加载数据和预训练模型 |
||||
|
|
||||
|
|
||||
|
|
||||
|
## 数据集 |
||||
|
> 通过经典二维拓扑优化代码生成的三组形变、密度数据 |
||||
|
|
||||
|
mod1:  |
||||
|
|
||||
|
mod2:  |
||||
|
|
||||
|
mod3:  |
||||
|
|
||||
|
## 项目结构 |
||||
|
``` |
||||
|
. |
||||
|
├── checkpoints |
||||
|
│ ├── ... |
||||
|
├── datasets |
||||
|
│ ├── ... |
||||
|
├── models |
||||
|
│ ├── ANN.py |
||||
|
│ ├── AutoEncoder.py |
||||
|
│ └── CNN.py |
||||
|
├── README.md |
||||
|
├── requirements.txt |
||||
|
├── test.py |
||||
|
├── train.py |
||||
|
└── utils |
||||
|
├── data_loader.py |
||||
|
└── data_standardizer.py |
||||
|
``` |
Binary file not shown.
Binary file not shown.
Binary file not shown.
After Width: | Height: | Size: 1.9 KiB |
Binary file not shown.
Binary file not shown.
After Width: | Height: | Size: 2.0 KiB |
Binary file not shown.
Binary file not shown.
After Width: | Height: | Size: 2.0 KiB |
Binary file not shown.
Binary file not shown.
@ -0,0 +1,45 @@ |
|||||
|
import torch |
||||
|
import torch.nn as nn |
||||
|
import torch.nn.functional as F |
||||
|
|
||||
|
class ANN_Model(nn.Module): |
||||
|
def __init__(self,input_features=8,out_features=72): |
||||
|
super().__init__() |
||||
|
self.fc1=nn.Linear(input_features,12) |
||||
|
self.fc2=nn.Linear(12,16) |
||||
|
self.fc3=nn.Linear(16,20) |
||||
|
self.fc4=nn.Linear(20,25) |
||||
|
|
||||
|
self.fc5=nn.Linear(50,60) |
||||
|
self.fc6=nn.Linear(60,70) |
||||
|
self.fc7=nn.Linear(70,80) |
||||
|
self.fc8=nn.Linear(80,90) |
||||
|
self.fc9=nn.Linear(90,100) |
||||
|
self.fc10=nn.Linear(100,90) |
||||
|
self.fc11=nn.Linear(90,80) |
||||
|
|
||||
|
|
||||
|
self.out=nn.Linear(80,out_features) |
||||
|
|
||||
|
|
||||
|
def forward(self,x): |
||||
|
density=x[:,:25].reshape(x.shape[0],25) |
||||
|
displace = x[:,25:] |
||||
|
x = F.relu(self.fc1(displace)) |
||||
|
x = F.relu(self.fc2(x)) |
||||
|
x = F.relu(self.fc3(x)) |
||||
|
x = F.relu(self.fc4(x)) |
||||
|
x = torch.hstack((density,x)) |
||||
|
|
||||
|
x = F.relu(self.fc5(x)) |
||||
|
x = F.relu(self.fc6(x)) |
||||
|
x = F.relu(self.fc7(x)) |
||||
|
x = F.relu(self.fc8(x)) |
||||
|
x = F.relu(self.fc9(x)) |
||||
|
x = F.relu(self.fc10(x)) |
||||
|
x = F.relu(self.fc11(x)) |
||||
|
|
||||
|
|
||||
|
x = self.out(x) |
||||
|
|
||||
|
return x |
@ -0,0 +1,76 @@ |
|||||
|
#### ENCODER_Model |
||||
|
import torch |
||||
|
import torch.nn as nn |
||||
|
import torch.nn.functional as F |
||||
|
|
||||
|
class AutoEncoder_Model(nn.Module): |
||||
|
def __init__(self,input_features=33,out_features=72): |
||||
|
super().__init__() |
||||
|
self.upsample25x=nn.Upsample(scale_factor=2.5, mode='bilinear') |
||||
|
|
||||
|
self.fc1=nn.Linear(75,100) |
||||
|
self.fc2=nn.Linear(100,150) |
||||
|
self.fc3=nn.Linear(150,200) |
||||
|
self.fc4=nn.Linear(200,300) |
||||
|
self.fc5=nn.Linear(300,400) |
||||
|
self.fc6=nn.Linear(400,500) |
||||
|
self.fc7=nn.Linear(500,576) |
||||
|
|
||||
|
self.fc8=nn.Linear(576,500) |
||||
|
self.fc9=nn.Linear(500,400) |
||||
|
self.fc10=nn.Linear(400,300) |
||||
|
self.fc11=nn.Linear(300,200) |
||||
|
self.fc12=nn.Linear(200,150) |
||||
|
self.fc13=nn.Linear(150,100) |
||||
|
self.fc14=nn.Linear(100,72) |
||||
|
|
||||
|
|
||||
|
def forward(self,x): |
||||
|
B=x.shape[0] |
||||
|
density=x[:,:25] |
||||
|
density=density.reshape(B,1,5,5) # B 1(C) 5 5 |
||||
|
|
||||
|
u = x[:,25:29].reshape(B,1,2,2) # B 1(C) 2 2 |
||||
|
v = x[:,29:].reshape(B,1,2,2) # B 1(C) 2 2 |
||||
|
|
||||
|
displace = torch.cat((u,v),1) # B 2(C) 2 2 |
||||
|
|
||||
|
displace = self.upsample25x(displace) #升维度 -> B 2 5 5 |
||||
|
|
||||
|
# 1.矩阵相乘做耦合 |
||||
|
# u = torch.mul(displace[:,0,:,:],density[:,0,:,:]) |
||||
|
# v = torch.mul(displace[:,1,:,:],density[:,0,:,:]) |
||||
|
# x = torch.stack((u,v),1) # B 2 5 5 |
||||
|
# x = x.reshape(B,50) |
||||
|
|
||||
|
# 2.卷积做耦合 |
||||
|
# self.conv55=nn.Conv2d(3,2,3,padding=1) # B 3 5 5 -> B 2 5 5 |
||||
|
# |
||||
|
|
||||
|
# 3.直接 cat 接上 |
||||
|
x = torch.cat((displace,density),1) |
||||
|
x = x.reshape(B,75) |
||||
|
|
||||
|
x = torch.autograd.Variable(x,requires_grad=True) |
||||
|
|
||||
|
|
||||
|
# Encode |
||||
|
x = F.relu(self.fc1(x)) |
||||
|
x = F.relu(self.fc2(x)) |
||||
|
x = F.relu(self.fc3(x)) |
||||
|
x = F.relu(self.fc4(x)) |
||||
|
x = F.relu(self.fc5(x)) |
||||
|
x = F.relu(self.fc6(x)) |
||||
|
x = F.relu(self.fc7(x)) |
||||
|
shape_func=x.clone() |
||||
|
|
||||
|
# Decode |
||||
|
x = F.relu(self.fc8(x)) |
||||
|
x = F.relu(self.fc9(x)) |
||||
|
x = F.relu(self.fc10(x)) |
||||
|
x = F.relu(self.fc11(x)) |
||||
|
x = F.relu(self.fc12(x)) |
||||
|
x = F.relu(self.fc13(x)) |
||||
|
x = F.relu(self.fc14(x)) |
||||
|
|
||||
|
return x, shape_func |
@ -0,0 +1,63 @@ |
|||||
|
import torch |
||||
|
import torch.nn as nn |
||||
|
import torch.nn.functional as F |
||||
|
|
||||
|
class CNN_Model(nn.Module): |
||||
|
def __init__(self,input_features=8,out_features=72): |
||||
|
super().__init__() |
||||
|
|
||||
|
self.upsample25x=nn.Upsample(scale_factor=2.5, mode='bilinear') |
||||
|
# self.conv55=nn.Conv2d(3,3,3,padding=1) # keep B 3 5 5 |
||||
|
# self.conv56=nn.Conv2d(3,3,4,padding=2) # B 3 5 5 -> B 3 5 6 |
||||
|
|
||||
|
# self.conv66_1=nn.Conv2d(3,5,3,padding=1) # B 3 6 6 -> B 64 6 6 |
||||
|
# self.conv66_2=nn.Conv2d(5,8,3,padding=1) # B 64 6 6 -> B 32 6 6 |
||||
|
# self.conv66_3=nn.Conv2d(8,5,3,padding=1) # B 32 6 6 -> B 8 6 6 |
||||
|
# self.conv66_4=nn.Conv2d(5,2,3,padding=1) # B 8 6 6 -> B 2 6 6 |
||||
|
|
||||
|
self.conv55=nn.Conv2d(3,2,3,padding=1) # B 3 5 5 -> B 2 5 5 |
||||
|
self.fc1=nn.Linear(50,70) |
||||
|
self.fc2=nn.Linear(70,90) |
||||
|
self.fc3=nn.Linear(90,110) |
||||
|
self.fc4=nn.Linear(110,130) |
||||
|
self.fc5=nn.Linear(130,100) |
||||
|
self.fc6=nn.Linear(100,80) |
||||
|
self.fc7=nn.Linear(80,72) |
||||
|
|
||||
|
def forward(self,x): |
||||
|
B=x.shape[0] |
||||
|
density=x[:,:25] |
||||
|
density=density.reshape(B,1,5,5) # B 1 5 5 |
||||
|
|
||||
|
|
||||
|
displace = x[:,25:] |
||||
|
displace = displace.reshape(B,2,2,2) # B 2 2 2(C) |
||||
|
displace = displace.permute(0,3,1,2) #更换张量维度顺序为->B C W H |
||||
|
displace = self.upsample25x(displace) #升维度 -> B 2 5 5 |
||||
|
|
||||
|
# x = torch.cat((displace,density),1) |
||||
|
|
||||
|
|
||||
|
# x = F.relu(self.conv56(x)) |
||||
|
# x = F.relu(self.conv66_1(x)) |
||||
|
# x = F.relu(self.conv66_2(x)) |
||||
|
# x = F.relu(self.conv66_3(x)) |
||||
|
# x = F.relu(self.conv66_4(x)) |
||||
|
|
||||
|
# x = x.permute(0,2,3,1) |
||||
|
# x = x.reshape(B,72) |
||||
|
|
||||
|
u = torch.mul(displace[:,0,:,:],density[:,0,:,:]).reshape(B,25) |
||||
|
v = torch.mul(displace[:,1,:,:],density[:,0,:,:]).reshape(B,25) |
||||
|
x = torch.cat((u,v),1) |
||||
|
|
||||
|
|
||||
|
x = F.relu(self.fc1(x)) |
||||
|
x = F.relu(self.fc2(x)) |
||||
|
x = F.relu(self.fc3(x)) |
||||
|
x = F.relu(self.fc4(x)) |
||||
|
x = F.relu(self.fc5(x)) |
||||
|
x = F.relu(self.fc6(x)) |
||||
|
x = F.relu(self.fc7(x)) |
||||
|
|
||||
|
return x |
@ -0,0 +1,4 @@ |
|||||
|
matplotlib==3.8.0 |
||||
|
numpy==1.26.2 |
||||
|
scikit_learn==1.3.0 |
||||
|
torch==2.1.0 |
@ -0,0 +1,63 @@ |
|||||
|
import numpy as np |
||||
|
|
||||
|
import torch |
||||
|
import torch.nn as nn |
||||
|
import torch.nn.functional as F |
||||
|
|
||||
|
from utils.data_standardizer import standardization |
||||
|
from utils.data_loader import data_loader |
||||
|
|
||||
|
import matplotlib.pyplot as plt |
||||
|
|
||||
|
def test(model_load_path, X, standard = False, device = 0): |
||||
|
model = torch.load(model_load_path) |
||||
|
|
||||
|
if standard: |
||||
|
X = standardization(X) |
||||
|
device = f'cuda:{device}' if torch.cuda.is_available() else 'cpu' |
||||
|
X = torch.from_numpy(X).type(torch.float32).to(device) |
||||
|
|
||||
|
with torch.no_grad(): |
||||
|
return model(X) |
||||
|
|
||||
|
|
||||
|
if __name__=='__main__': |
||||
|
# Load datasets |
||||
|
# test data select: |
||||
|
dataload_mod='mod1' # opt: mod1 mod2 mod3 |
||||
|
# pretrained model select: |
||||
|
pretrained_mod='mod1' # opt: mod1 mod2 mod3 |
||||
|
|
||||
|
dst_path='datasets/top88_'+ dataload_mod + '_xPhys_180_60.npy' |
||||
|
U_path='datasets/top88_'+ dataload_mod + '_u_180_60.npy' |
||||
|
global_density, global_displace, coarse_density, coarse_displace, fine_displace = data_loader(dst_path, U_path) |
||||
|
X = np.hstack((coarse_density[:,:] , coarse_displace[:,:,0] , coarse_displace[:,:,1])) |
||||
|
Y = fine_displace[:,:] |
||||
|
|
||||
|
# Set loss function |
||||
|
loss_function = nn.MSELoss() |
||||
|
|
||||
|
# Predict |
||||
|
pred = test('checkpoints/ANN_' + pretrained_mod + '_opt.pt', X) |
||||
|
|
||||
|
# Calculate loss |
||||
|
pred_loss=[] |
||||
|
device = f'cuda:{0}' if torch.cuda.is_available() else 'cpu' |
||||
|
Y = torch.from_numpy(Y).type(torch.float32).to(device) |
||||
|
for i in range(pred.shape[0]): |
||||
|
pred_loss.append(loss_function(pred[i,:],Y[i,:]).item()) |
||||
|
|
||||
|
print('Total loss: '+ str(loss_function(pred,Y).item())) |
||||
|
|
||||
|
# Plot |
||||
|
plt.plot(range(pred.shape[0]),pred_loss) |
||||
|
plt.ylabel('Loss') |
||||
|
plt.xlabel('Coarse mesh id') |
||||
|
plt.title("Linear graph") |
||||
|
plt.show() |
||||
|
|
||||
|
loss_metrix = np.asarray(pred_loss) |
||||
|
loss_metrix = loss_metrix.reshape(int(60/5), int(180/5)) |
||||
|
plt.matshow(loss_metrix) |
||||
|
plt.title("Show loss value in grid") |
||||
|
plt.show() |
@ -0,0 +1,85 @@ |
|||||
|
import numpy as np |
||||
|
import time |
||||
|
import matplotlib.pyplot as plt |
||||
|
|
||||
|
from sklearn.model_selection import train_test_split |
||||
|
from utils.data_standardizer import standardization |
||||
|
from utils.data_loader import data_loader |
||||
|
|
||||
|
import torch |
||||
|
import torch.nn as nn |
||||
|
import torch.nn.functional as F |
||||
|
|
||||
|
from models.ANN import ANN_Model |
||||
|
|
||||
|
|
||||
|
def train(X, Y, epochs=10000, mod='mod1', standard = False, device = 0): |
||||
|
if standard: |
||||
|
X = standardization(X) |
||||
|
Y = standardization(Y) |
||||
|
|
||||
|
X_train,X_test,Y_train,Y_test=train_test_split(X,Y,test_size=0.2,random_state=0) |
||||
|
|
||||
|
device = f'cuda:{device}' if torch.cuda.is_available() else 'cpu' |
||||
|
|
||||
|
X_train=torch.from_numpy(X_train).type(torch.float32).to(device) |
||||
|
X_test=torch.from_numpy(X_test).type(torch.float32).to(device) |
||||
|
Y_train=torch.from_numpy(Y_train).type(torch.float32).to(device) |
||||
|
Y_test=torch.from_numpy(Y_test).type(torch.float32).to(device) |
||||
|
|
||||
|
# Load net model |
||||
|
torch.manual_seed(20) |
||||
|
model = ANN_Model() |
||||
|
# model = CNN_Model() |
||||
|
# model = ENCODER_Model() |
||||
|
model.parameters |
||||
|
model=model.to(device) |
||||
|
print(model) |
||||
|
|
||||
|
# Set loss function |
||||
|
loss_function = nn.MSELoss() |
||||
|
# MSE_loss=nn.MSELoss() |
||||
|
# BCE_loss=nn.BCELoss() |
||||
|
|
||||
|
# Set adam optimizer |
||||
|
optimizer=torch.optim.Adam(model.parameters(),lr=0.001) # ANN 学习率最好0.001 左右(无归一化) |
||||
|
|
||||
|
# Train |
||||
|
start_time=time.time() |
||||
|
losses=[] |
||||
|
for i in range(epochs): |
||||
|
pred = model.forward(X_train) |
||||
|
loss=loss_function(pred,Y_train) |
||||
|
# loss.requires_grad_(True) |
||||
|
losses.append(loss.cpu().detach().numpy()) |
||||
|
if i%(epochs/10)==1: |
||||
|
print("Epoch number: {} and the loss : {}".format(i,loss.item())) |
||||
|
optimizer.zero_grad() |
||||
|
loss.backward() |
||||
|
optimizer.step() |
||||
|
print(time.time()-start_time) |
||||
|
|
||||
|
torch.save(model, 'checkpoints/' + str(model).split('_')[0] + '_' + mod + '_' + 'opt.pt') |
||||
|
|
||||
|
return losses |
||||
|
|
||||
|
|
||||
|
if __name__=='__main__': |
||||
|
# Load datasets |
||||
|
# train data select: |
||||
|
data_mod='mod1' # opt: mod1 mod2 mod3 |
||||
|
|
||||
|
dst_path='datasets/top88_'+ data_mod + '_xPhys_180_60.npy' |
||||
|
U_path='datasets/top88_'+ data_mod + '_u_180_60.npy' |
||||
|
global_density, global_displace, coarse_density, coarse_displace, fine_displace = data_loader(dst_path, U_path) |
||||
|
X = np.hstack((coarse_density[:,:] , coarse_displace[:,:,0] , coarse_displace[:,:,1])) |
||||
|
Y = fine_displace[:,:] |
||||
|
|
||||
|
# Train |
||||
|
losses = train(X, Y, epochs=10000, mod=data_mod) |
||||
|
|
||||
|
# plot loss |
||||
|
plt.plot(range(10000),losses) |
||||
|
plt.ylabel('Loss') |
||||
|
plt.xlabel('Epoch') |
||||
|
plt.show() |
@ -0,0 +1,46 @@ |
|||||
|
import numpy as np |
||||
|
|
||||
|
def data_loader(density_load_path, displace_load_path): |
||||
|
# Load datasets |
||||
|
global_density = np.load(density_load_path) |
||||
|
global_displace = np.load(displace_load_path) |
||||
|
global_displace = global_displace.reshape(181,61,2) |
||||
|
global_displace = np.dstack((global_displace[:,:,0].T, global_displace[:,:,1].T)) |
||||
|
print(global_displace.shape) |
||||
|
print(global_density.shape) |
||||
|
|
||||
|
m=5 |
||||
|
N=(m+1)**2 |
||||
|
global_nely=global_density.shape[0] |
||||
|
global_nelx=global_density.shape[1] |
||||
|
coarse_nely = int(global_nely/m) |
||||
|
coarse_nelx = int(global_nelx/m) |
||||
|
|
||||
|
# Generate coarse mesh density |
||||
|
coarse_density=np.zeros(shape=(coarse_nely*coarse_nelx,m*m)) |
||||
|
for ely in range(coarse_nely): |
||||
|
for elx in range(coarse_nelx): |
||||
|
coarse_density[elx + ely * m] = global_density[ely * m : (ely + 1) * m, elx * m : (elx + 1) * m].flatten() |
||||
|
print(coarse_density.shape) |
||||
|
|
||||
|
# Generate coarse mesh displacement |
||||
|
coarse_displace=np.zeros(shape=(coarse_nely*coarse_nelx,4,2)) |
||||
|
for ely in range(coarse_nely): |
||||
|
for elx in range(coarse_nelx): |
||||
|
coarse_displace[elx + ely * m][0] = global_displace[ely * m, elx * m, :] |
||||
|
coarse_displace[elx + ely * m][1] = global_displace[ely * m, (elx+1) * m, :] |
||||
|
coarse_displace[elx + ely * m][2] = global_displace[(ely+1) * m, elx * m, :] |
||||
|
coarse_displace[elx + ely * m][3] = global_displace[(ely+1) * m, (elx+1) * m, :] |
||||
|
print(coarse_displace.shape) |
||||
|
|
||||
|
# Generate fine mesh displacement |
||||
|
fine_displace=np.zeros(shape=(coarse_nely*coarse_nelx, ((m+1)**2) * 2)) |
||||
|
for ely in range(coarse_nely): |
||||
|
for elx in range(coarse_nelx): |
||||
|
fine_displace[elx + ely * m] = global_displace[ely*m : (ely+1)*m+1, elx*m : (elx+1)*m+1, :].flatten() |
||||
|
print(fine_displace.shape) |
||||
|
|
||||
|
return global_density, global_displace, coarse_density, coarse_displace, fine_displace |
||||
|
|
||||
|
if __name__=='__main__': |
||||
|
data_loader() |
@ -0,0 +1,7 @@ |
|||||
|
import numpy as np |
||||
|
|
||||
|
def standardization(data): |
||||
|
mu = np.mean(data, axis=0) |
||||
|
sigma = np.std(data, axis=0) |
||||
|
return (data - mu) / sigma |
||||
|
|
Loading…
Reference in new issue