Browse Source

rewrite with argparse for hyperparameter control

resources
郑敬润 1 year ago
parent
commit
3d389565c3
  1. 6
      .gitignore
  2. 74
      README.md
  3. BIN
      checkpoints/ANN_mod1_opt.pt
  4. BIN
      checkpoints/ANN_mod2_opt.pt
  5. BIN
      checkpoints/ANN_mod3_opt.pt
  6. BIN
      datasets/top88_mod1_u_180_60.npy
  7. BIN
      datasets/top88_mod1_xPhys_180_60.npy
  8. BIN
      datasets/top88_mod2_u_180_60.npy
  9. BIN
      datasets/top88_mod2_xPhys_180_60.npy
  10. BIN
      datasets/top88_mod3_u_180_60.npy
  11. BIN
      datasets/top88_mod3_xPhys_180_60.npy
  12. 0
      doc/mod1.jpg
  13. 0
      doc/mod2.jpg
  14. 0
      doc/mod3.jpg
  15. 0
      models/__init__.py
  16. 1548
      my-FEA-net.ipynb
  17. 0
      options/__init__.py
  18. 110
      options/base_options.py
  19. 19
      options/test_options.py
  20. 23
      options/train_options.py
  21. 51
      test.py
  22. 2
      topopt_EMsFEA.py
  23. 59
      train.py
  24. 30
      utils/data_loader.py
  25. 4
      utils/topopt_88.py
  26. 23
      utils/utils.py
  27. 8
      visualization.ipynb

6
.gitignore

@ -8,4 +8,8 @@
# python # python
__pycache__ __pycache__
# results folder as cache for data check # results folder as cache for data check
results/ results/
# datasets
datasets/
# checkpoints
checkpoints/

74
README.md

@ -1,4 +1,4 @@
> 该项目是《Problem-independent machine learning (PIML)-based topology optimization—A universal approach》机器学习网络部分的复现 > 该项目是《Problem-independent machine learning (PIML)-based topology optimization—A universal approach》的python复现
## 环境依赖 ## 环境依赖
@ -13,42 +13,68 @@ pip install -r requirements.txt
``` ```
## Usage ## Usage
TODO: 用argparse模块管理网络参数 TODO: [done] 用argparse模块管理网络参数
### Train ### Train
`python train.py` ```
> 编辑 `train.py``data_mod` 变量选择数据 python train.py
# --mod [mod1 mod2 mod3] 参数选择训练数据,默认mod1
# e.g. python train.py --mod mod1
```
### Test ### Test
`python test.py` ```
> 编辑 `test.py``dataload_mod``pretrained_mod` 变量选择加载数据和预训练模型 python test.py
# --mod [mod1 mod2 mod3] 参数选择测试数据,默认mod3
# --pretrained_model_path <xxx_opt.pt> 选择预训练模型,默认./checkpoints/ANN_mod1/ANN_mod1_opt.pt
# e.g. python test.py --mod mod3 --pretrained_model_path ./checkpoints/ANN_mod1/ANN_mod1_opt.pt
```
### TopOpt with EMsFEA net
```
python topopt_EMsFEA.py
```
## 数据集 ## 数据集
> 通过经典二维拓扑优化代码生成的三组形变、密度数据 > 通过经典二维拓扑优化代码生成的三组形变、密度数据
mod1: ![](datasets/top88_mod1_img_180_60.jpg) mod1: ![](doc/mod1.jpg)
mod2: ![](datasets/top88_mod2_img_180_60.jpg) mod2: ![](doc/mod2.jpg)
mod3: ![](datasets/top88_mod3_img_180_60.jpg) mod3: ![](doc/mod3.jpg)
## 项目结构 ## 项目结构
``` ```
. .
├── checkpoints |-- README.md
│ ├── ... |-- checkpoints
├── datasets | `-- ...
│ ├── ... |-- datasets
├── models | |-- train
│ ├── ANN.py | | `--resolution
│ ├── AutoEncoder.py | | |--u
│ └── CNN.py | | `--xPhys
├── README.md | |-- test
├── requirements.txt |-- models
├── test.py | |-- ANN.py
├── train.py | |-- AutoEncoder.py
└── utils | |-- CNN.py
├── data_loader.py | `-- __init__.py
└── data_standardizer.py |-- options
| |-- __init__.py
| |-- base_options.py
| |-- test_options.py
| `-- train_options.py
|-- requirements.txt
|-- results
|-- test.py
|-- topopt_EMsFEA.py
|-- train.py
|-- utils
| |-- data_loader.py
| |-- data_standardizer.py
| |-- topopt_88.py
| `-- utils.py
`-- visualization.ipynb
``` ```

BIN
checkpoints/ANN_mod1_opt.pt

Binary file not shown.

BIN
checkpoints/ANN_mod2_opt.pt

Binary file not shown.

BIN
checkpoints/ANN_mod3_opt.pt

Binary file not shown.

BIN
datasets/top88_mod1_u_180_60.npy

Binary file not shown.

BIN
datasets/top88_mod1_xPhys_180_60.npy

Binary file not shown.

BIN
datasets/top88_mod2_u_180_60.npy

Binary file not shown.

BIN
datasets/top88_mod2_xPhys_180_60.npy

Binary file not shown.

BIN
datasets/top88_mod3_u_180_60.npy

Binary file not shown.

BIN
datasets/top88_mod3_xPhys_180_60.npy

Binary file not shown.

0
datasets/top88_mod1_img_180_60.jpg → doc/mod1.jpg

Before

Width:  |  Height:  |  Size: 1.9 KiB

After

Width:  |  Height:  |  Size: 1.9 KiB

0
datasets/top88_mod2_img_180_60.jpg → doc/mod2.jpg

Before

Width:  |  Height:  |  Size: 2.0 KiB

After

Width:  |  Height:  |  Size: 2.0 KiB

0
datasets/top88_mod3_img_180_60.jpg → doc/mod3.jpg

Before

Width:  |  Height:  |  Size: 2.0 KiB

After

Width:  |  Height:  |  Size: 2.0 KiB

0
models/__init__.py

1548
my-FEA-net.ipynb

File diff suppressed because one or more lines are too long

0
options/__init__.py

110
options/base_options.py

@ -0,0 +1,110 @@
import argparse
import os
from utils import utils
import torch
class BaseOptions():
"""This class defines options used during both training and test time.
It also implements several helper functions such as parsing, printing, and saving the options.
It also gathers additional options defined in <modify_commandline_options> functions in both dataset class and model class.
"""
def __init__(self):
"""Reset the class; indicates the class hasn't been initailized"""
self.initialized = False
def initialize(self, parser):
"""Define the common options that are used in both training and test."""
# basic parameters
parser.add_argument('--dataroot', type=str, default='./datasets', help='root path to datasets')
parser.add_argument('--name', type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models')
parser.add_argument('--gpu_id', type=str, default='0', help='gpu ids: e.g. 0, 1, ... . use -1 for CPU')
parser.add_argument('--device', type=str, default='cuda:0', help='generate device with gpu_id and usable user device')
parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
parser.add_argument('--ms_ratio', type=int, default=5, help='multiscale ratio')
# model parameters
parser.add_argument('--model', type=str, default='ANN', help='chooses which model to use. [ANN | CNN | AutoEncoder]')
# dataset parameters
# parser.add_argument('--resolution', type=str, default='180_60', help='data resolution. nelx_nely here')
parser.add_argument('--nelx', type=int, default=180, help='num of elements on x-axis')
parser.add_argument('--nely', type=int, default=60, help='num of elements on y-axis')
parser.add_argument('--nelz', type=int, default=1, help='num of elements on z-axis')
parser.add_argument('--dimension', type=int, default=2, help='dimension of dataset models')
parser.add_argument('--is_standard', type=bool, default=False, help='whether need standardization or not')
# additional parameters
parser.add_argument('--epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
parser.add_argument('--load_iter', type=int, default=0, help='which iteration to load? if load_iter > 0, the code will load models by iter_[load_iter]; otherwise, the code will load models by [epoch]')
parser.add_argument('--verbose', action='store_true', help='if specified, print more debugging information')
parser.add_argument('--suffix', type=str, default='', help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{load_size}')
# identify initializiation timing
self.initialized = True
return parser
def gather_options(self):
"""Initialize our parser with basic options(only once).
Add additional model-specific and dataset-specific options.
These options are defined in the <modify_commandline_options> function
in model and dataset classes.
"""
if not self.initialized: # check if it has been initialized
parser = argparse.ArgumentParser() # customize help formatting with <formatter_class>
parser = self.initialize(parser)
# get the basic options
opt, _ = parser.parse_known_args()
# save and return the parser
self.parser = parser
return parser.parse_args()
def print_options(self, opt):
"""Print and save options
It will print both current options and default values(if different).
It will save options into a text file / [checkpoints_dir] / opt.txt
"""
message = ''
message += '----------------- Options ---------------\n'
for k, v in sorted(vars(opt).items()):
comment = ''
default = self.parser.get_default(k)
if v != default:
comment = '\t[default: %s]' % str(default)
message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
message += '----------------- End -------------------'
print(message)
# save to the disk
expr_dir = os.path.join(opt.checkpoints_dir, opt.model+'_'+opt.mod)
utils.mkdir(expr_dir)
file_name = os.path.join(expr_dir, '{}_opt.txt'.format(opt.phase))
with open(file_name, 'wt') as opt_file:
opt_file.write(message)
opt_file.write('\n')
def parse(self):
"""Parse our options, create checkpoints directory suffix, and set up gpu device."""
opt = self.gather_options()
opt.isTrain = self.isTrain # train or test
# process opt.suffix
if opt.suffix:
suffix = ('_' + opt.suffix.format(**vars(opt))) if opt.suffix != '' else ''
opt.name = opt.name + suffix
self.print_options(opt)
# set device with gpu id
if opt.gpu_id == -1:
opt.device = 'cpu'
else:
opt.device = f'cuda:{opt.gpu_id}' if torch.cuda.is_available() else 'cpu'
self.opt = opt
return self.opt

19
options/test_options.py

@ -0,0 +1,19 @@
from .base_options import BaseOptions
class TestOptions(BaseOptions):
"""This class includes test options.
It also includes shared options defined in BaseOptions.
"""
def initialize(self, parser):
parser = BaseOptions.initialize(self, parser) # define shared options
parser.add_argument('--results_dir', type=str, default='./results', help='saves results here.')
parser.add_argument('--phase', type=str, default='test', help='train, val, test, etc')
parser.add_argument('--mod', type=str, default='mod3', help='chooses which dataset model for test. mod1....')
parser.add_argument('--pretrained_model_path', type=str, default='./checkpoints/ANN_mod1/ANN_mod1_opt.pt', help='pretrained model file load path')
self.isTrain = False
return parser

23
options/train_options.py

@ -0,0 +1,23 @@
from .base_options import BaseOptions
class TrainOptions(BaseOptions):
"""This class includes training options.
It also includes shared options defined in BaseOptions.
"""
def initialize(self, parser):
parser = BaseOptions.initialize(self, parser)
# visdom and HTML visualization parameters
# network saving and loading parameters
parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc')
# training parameters
parser.add_argument('--epochs', type=int, default=10000, help='number of epochs')
parser.add_argument('--lr', type=float, default=0.001, help='initial learning rate for adam')
parser.add_argument('--mod', type=str, default='mod2', help='chooses which dataset model for train. mod1....')
self.isTrain = True
return parser

51
test.py

@ -1,4 +1,6 @@
import os
import numpy as np import numpy as np
import matplotlib.pyplot as plt
import torch import torch
import torch.nn as nn import torch.nn as nn
@ -6,58 +8,53 @@ import torch.nn.functional as F
from utils.data_standardizer import standardization from utils.data_standardizer import standardization
from utils.data_loader import data_loader from utils.data_loader import data_loader
from options.test_options import TestOptions
import matplotlib.pyplot as plt
def test(model_load_path, X, standard = False, device = 0): def test(X, opt):
model = torch.load(model_load_path) # OLD: model_load_path, X, standard = False, device = 0
if opt.is_standard:
if standard:
X = standardization(X) X = standardization(X)
device = f'cuda:{device}' if torch.cuda.is_available() else 'cpu'
X = torch.from_numpy(X).type(torch.float32).to(device) X_test=torch.from_numpy(X).type(torch.float32).to(opt.device)
with torch.no_grad(): model = torch.load(opt.pretrained_model_path)
return model(X) return model(X_test)
if __name__=='__main__': if __name__=='__main__':
# Load datasets # Load parmetaers
# test data select: opt = TestOptions().parse()
dataload_mod='mod1' # opt: mod1 mod2 mod3
# pretrained model select:
pretrained_mod='mod1' # opt: mod1 mod2 mod3
dst_path='datasets/top88_'+ dataload_mod + '_xPhys_180_60.npy' # Load datasets, mod2 as default
U_path='datasets/top88_'+ dataload_mod + '_u_180_60.npy' global_density, global_displace, coarse_density, coarse_displace, fine_displace = data_loader(opt)
global_density, global_displace, coarse_density, coarse_displace, fine_displace = data_loader(dst_path, U_path)
X = np.hstack((coarse_density[:,:] , coarse_displace[:,:,0] , coarse_displace[:,:,1])) X = np.hstack((coarse_density[:,:] , coarse_displace[:,:,0] , coarse_displace[:,:,1]))
Y = fine_displace[:,:] Y = fine_displace[:,:]
# Predict
pred = test(X, opt)
# Set loss function # Set loss function
loss_function = nn.MSELoss() loss_function = nn.MSELoss()
# Predict
pred = test('checkpoints/ANN_' + pretrained_mod + '_opt.pt', X)
# Calculate loss # Calculate loss
pred_loss=[] pred_loss=[]
device = f'cuda:{0}' if torch.cuda.is_available() else 'cpu' Y_test = torch.from_numpy(Y).type(torch.float32).to(opt.device)
Y = torch.from_numpy(Y).type(torch.float32).to(device)
for i in range(pred.shape[0]): for i in range(pred.shape[0]):
pred_loss.append(loss_function(pred[i,:],Y[i,:]).item()) pred_loss.append(loss_function(pred[i,:],Y_test[i,:]).item())
print('Total loss: '+ str(loss_function(pred,Y).item())) print('Total loss: '+ str(loss_function(pred,Y_test).item()))
# Plot # Plot
plt.plot(range(pred.shape[0]),pred_loss) plt.plot(range(pred.shape[0]),pred_loss)
plt.ylabel('Loss') plt.ylabel('Loss')
plt.xlabel('Coarse mesh id') plt.xlabel('Coarse mesh id')
plt.title("Linear graph") plt.title("Linear graph")
plt.savefig(os.path.join(opt.results_dir, 'test_loss.png'))
plt.show() plt.show()
loss_metrix = np.asarray(pred_loss) loss_metrix = np.asarray(pred_loss)
loss_metrix = loss_metrix.reshape(int(60/5), int(180/5)) loss_metrix = loss_metrix.reshape(int(opt.nely/opt.ms_ratio), int(opt.nelx/opt.ms_ratio))
plt.matshow(loss_metrix) plt.matshow(loss_metrix)
plt.title("Show loss value in grid") plt.title("Show loss value in grid")
plt.savefig(os.path.join(opt.results_dir, 'test_loss_in_grid.png'))
plt.show() plt.show()

2
topopt_EMsFEA.py

@ -126,7 +126,7 @@ def top_EMsFEA(nelx,nely,volfrac,penal,rmin,ft,mod_idx,m):
# Solve coarse situation # Solve coarse situation
c_u[coarse_free,0]=spsolve(K,c_f[coarse_free,0]) c_u[coarse_free,0]=spsolve(K,c_f[coarse_free,0])
# Predict fine situation # Predict fine situation
u=pred_net(c_u,xPhys,c_nelx,c_nely,m,'checkpoints/ANN_mod1_opt.pt') u=pred_net(c_u,xPhys,c_nelx,c_nely,m,'checkpoints/ANN_mod1/ANN_mod1_opt.pt')
# print(f.shape, f) # print(f.shape, f)
# print(K.shape, K) # print(K.shape, K)

59
train.py

@ -1,85 +1,76 @@
import numpy as np
import time import time
import os
import numpy as np
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from utils.data_standardizer import standardization
from utils.data_loader import data_loader
import torch import torch
import torch.nn as nn import torch.nn as nn
import torch.nn.functional as F import torch.nn.functional as F
from models.ANN import ANN_Model from utils.data_standardizer import standardization
from utils.data_loader import data_loader
from options.train_options import TrainOptions
from models.ANN import ANN_Model
def train(X, Y, epochs=10000, mod='mod1', standard = False, device = 0): def train(X, Y, opt):
if standard: if opt.is_standard:
X = standardization(X) X = standardization(X)
Y = standardization(Y) Y = standardization(Y)
X_train,X_test,Y_train,Y_test=train_test_split(X,Y,test_size=0.2,random_state=0) X_train=torch.from_numpy(X).type(torch.float32).to(opt.device)
Y_train=torch.from_numpy(Y).type(torch.float32).to(opt.device)
device = f'cuda:{device}' if torch.cuda.is_available() else 'cpu'
X_train=torch.from_numpy(X_train).type(torch.float32).to(device)
X_test=torch.from_numpy(X_test).type(torch.float32).to(device)
Y_train=torch.from_numpy(Y_train).type(torch.float32).to(device)
Y_test=torch.from_numpy(Y_test).type(torch.float32).to(device)
# Load net model # Load net model
torch.manual_seed(20) torch.manual_seed(20)
model = ANN_Model() model_name=opt.model+'_Model'
# model = CNN_Model() model = eval(model_name)() # ANN_Model() as default
# model = ENCODER_Model()
model.parameters model.parameters
model=model.to(device) model=model.to(opt.device)
print(model) print(model)
# Set loss function # Set loss function
loss_function = nn.MSELoss() loss_function = nn.MSELoss()
# MSE_loss=nn.MSELoss()
# BCE_loss=nn.BCELoss()
# Set adam optimizer # Set adam optimizer
optimizer=torch.optim.Adam(model.parameters(),lr=0.001) # ANN 学习率最好0.001 左右(无归一化) optimizer=torch.optim.Adam(model.parameters(),lr=opt.lr) # ANN 学习率最好0.001 左右(无归一化)
# Train # Train
start_time=time.time() start_time=time.time()
losses=[] losses=[]
for i in range(epochs): for i in range(opt.epochs):
pred = model.forward(X_train) pred = model.forward(X_train)
loss=loss_function(pred,Y_train) loss=loss_function(pred,Y_train)
# loss.requires_grad_(True) # loss.requires_grad_(True)
losses.append(loss.cpu().detach().numpy()) losses.append(loss.cpu().detach().numpy())
if i%(epochs/10)==1: if i%(opt.epochs/10)==1:
print("Epoch number: {} and the loss : {}".format(i,loss.item())) print("Epoch number: {} and the loss : {}".format(i,loss.item()))
optimizer.zero_grad() optimizer.zero_grad()
loss.backward() loss.backward()
optimizer.step() optimizer.step()
print(time.time()-start_time) print(time.time()-start_time)
torch.save(model, 'checkpoints/' + str(model).split('_')[0] + '_' + mod + '_' + 'opt.pt') # save trained model, mkdir opt has done in options/base_options.py
save_path=os.path.join(opt.checkpoints_dir, opt.model+'_'+opt.mod, opt.model+'_'+opt.mod+'_opt.pt')
torch.save(model, save_path)
return losses return losses
if __name__=='__main__': if __name__=='__main__':
# Load datasets # Load parmetaers
# train data select: opt = TrainOptions().parse()
data_mod='mod1' # opt: mod1 mod2 mod3
dst_path='datasets/top88_'+ data_mod + '_xPhys_180_60.npy' # Load datasets, mod1 as default
U_path='datasets/top88_'+ data_mod + '_u_180_60.npy' global_density, global_displace, coarse_density, coarse_displace, fine_displace = data_loader(opt)
global_density, global_displace, coarse_density, coarse_displace, fine_displace = data_loader(dst_path, U_path)
X = np.hstack((coarse_density[:,:] , coarse_displace[:,:,0] , coarse_displace[:,:,1])) X = np.hstack((coarse_density[:,:] , coarse_displace[:,:,0] , coarse_displace[:,:,1]))
Y = fine_displace[:,:] Y = fine_displace[:,:]
# Train # Train
losses = train(X, Y, epochs=10000, mod=data_mod) losses = train(X, Y, opt)
# plot loss # plot loss
plt.plot(range(10000),losses) plt.plot(range(opt.epochs),losses)
plt.ylabel('Loss') plt.ylabel('Loss')
plt.xlabel('Epoch') plt.xlabel('Epoch')
plt.show() plt.show()

30
utils/data_loader.py

@ -1,15 +1,20 @@
import numpy as np import numpy as np
import os
def data_loader(density_load_path, displace_load_path): def data_loader(opt):
# Load datasets # Load datasets
global_density = np.load(density_load_path)
global_displace = np.load(displace_load_path) # './datasets/train/180_60/u_OR_xPhys/mod1.npy' as default
global_displace = global_displace.reshape(181,61,2) density_load_path = os.path.join(opt.dataroot, opt.phase, str(opt.nelx)+'_'+str(opt.nely), 'xPhys', opt.mod+'.npy')
global_displace = np.dstack((global_displace[:,:,0].T, global_displace[:,:,1].T)) displace_load_path = os.path.join(opt.dataroot, opt.phase, str(opt.nelx)+'_'+str(opt.nely), 'u', opt.mod+'.npy')
print(global_displace.shape)
print(global_density.shape) global_density = np.load(density_load_path) # (nely , nelx)
global_displace = np.load(displace_load_path) # ( (nely+1)*(nelx+1)*2 , 1 )
m=5
global_displace = global_displace.reshape(opt.nelx+1, opt.nely+1, 2)
global_displace = np.dstack((global_displace[:,:,0].T, global_displace[:,:,1].T)) # -> ( (nelx+1), (nelx+1), 2 )
m=opt.ms_ratio
N=(m+1)**2 N=(m+1)**2
global_nely=global_density.shape[0] global_nely=global_density.shape[0]
global_nelx=global_density.shape[1] global_nelx=global_density.shape[1]
@ -44,7 +49,6 @@ def data_loader(density_load_path, displace_load_path):
return global_density, global_displace, coarse_density, coarse_displace, fine_displace return global_density, global_displace, coarse_density, coarse_displace, fine_displace
if __name__=='__main__': if __name__=='__main__':
dataload_mod='mod1' # opt: mod1 mod2 mod3 from options.train_options import TrainOptions
dst_path='datasets/top88_'+ dataload_mod + '_xPhys_180_60.npy' opt = TrainOptions().parse()
U_path='datasets/top88_'+ dataload_mod + '_u_180_60.npy' data_loader(opt)
data_loader(dst_path,U_path)

4
utils/topopt_88.py

@ -169,8 +169,8 @@ def oc(nelx,nely,x,volfrac,dc,dv,g):
if __name__ == "__main__": if __name__ == "__main__":
# Default input parameters # Default input parameters
mod_idx='mod4' mod_idx='mod4'
nelx=180 nelx=30
nely=60 nely=10
volfrac=0.4 volfrac=0.4
rmin=5.4 rmin=5.4
penal=3.0 penal=3.0

23
utils/utils.py

@ -0,0 +1,23 @@
import os
def mkdirs(paths):
"""create empty directories if they don't exist
Parameters:
paths (str list) -- a list of directory paths
"""
if isinstance(paths, list) and not isinstance(paths, str):
for path in paths:
mkdir(path)
else:
mkdir(paths)
def mkdir(path):
"""create a single empty directory if it didn't exist
Parameters:
path (str) -- a single directory path
"""
if not os.path.exists(path):
os.makedirs(path)

8
test.ipynb → visualization.ipynb

@ -864,14 +864,6 @@
"surf_plot(global_displace,nely+1,nelx+1,'v')\n", "surf_plot(global_displace,nely+1,nelx+1,'v')\n",
"surf_plot(global_displace,nely+1,nelx+1,'sqrt')\n" "surf_plot(global_displace,nely+1,nelx+1,'sqrt')\n"
] ]
},
{
"cell_type": "code",
"execution_count": null,
"id": "b1bb17d6",
"metadata": {},
"outputs": [],
"source": []
} }
], ],
"metadata": { "metadata": {
Loading…
Cancel
Save