Browse Source

update datasets & pretrained models

resources
郑敬润 1 year ago
parent
commit
0f5af01adf
  1. BIN
      checkpoints.zip
  2. BIN
      datasets.zip
  3. BIN
      doc/mod1.jpg
  4. BIN
      doc/mod2.jpg
  5. BIN
      doc/mod3.jpg
  6. 45
      models/ANN.py
  7. 76
      models/AutoEncoder.py
  8. 63
      models/CNN.py
  9. 0
      models/__init__.py
  10. 0
      options/__init__.py
  11. 110
      options/base_options.py
  12. 19
      options/test_options.py
  13. 23
      options/train_options.py
  14. 4
      requirements.txt
  15. 60
      test.py
  16. 261
      topopt_EMsFEA.py
  17. 76
      train.py
  18. 54
      utils/data_loader.py
  19. 7
      utils/data_standardizer.py
  20. 185
      utils/topopt_88.py
  21. 23
      utils/utils.py
  22. 890
      visualization.ipynb

BIN
checkpoints.zip

Binary file not shown.

BIN
datasets.zip

Binary file not shown.

BIN
doc/mod1.jpg

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.9 KiB

BIN
doc/mod2.jpg

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.0 KiB

BIN
doc/mod3.jpg

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.0 KiB

45
models/ANN.py

@ -1,45 +0,0 @@
import torch
import torch.nn as nn
import torch.nn.functional as F
class ANN_Model(nn.Module):
def __init__(self,input_features=8,out_features=72):
super().__init__()
self.fc1=nn.Linear(input_features,12)
self.fc2=nn.Linear(12,16)
self.fc3=nn.Linear(16,20)
self.fc4=nn.Linear(20,25)
self.fc5=nn.Linear(50,60)
self.fc6=nn.Linear(60,70)
self.fc7=nn.Linear(70,80)
self.fc8=nn.Linear(80,90)
self.fc9=nn.Linear(90,100)
self.fc10=nn.Linear(100,90)
self.fc11=nn.Linear(90,80)
self.out=nn.Linear(80,out_features)
def forward(self,x):
density=x[:,:25].reshape(x.shape[0],25)
displace = x[:,25:]
x = F.relu(self.fc1(displace))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = F.relu(self.fc4(x))
x = torch.hstack((density,x))
x = F.relu(self.fc5(x))
x = F.relu(self.fc6(x))
x = F.relu(self.fc7(x))
x = F.relu(self.fc8(x))
x = F.relu(self.fc9(x))
x = F.relu(self.fc10(x))
x = F.relu(self.fc11(x))
x = self.out(x)
return x

76
models/AutoEncoder.py

@ -1,76 +0,0 @@
#### ENCODER_Model
import torch
import torch.nn as nn
import torch.nn.functional as F
class AutoEncoder_Model(nn.Module):
def __init__(self,input_features=33,out_features=72):
super().__init__()
self.upsample25x=nn.Upsample(scale_factor=2.5, mode='bilinear')
self.fc1=nn.Linear(75,100)
self.fc2=nn.Linear(100,150)
self.fc3=nn.Linear(150,200)
self.fc4=nn.Linear(200,300)
self.fc5=nn.Linear(300,400)
self.fc6=nn.Linear(400,500)
self.fc7=nn.Linear(500,576)
self.fc8=nn.Linear(576,500)
self.fc9=nn.Linear(500,400)
self.fc10=nn.Linear(400,300)
self.fc11=nn.Linear(300,200)
self.fc12=nn.Linear(200,150)
self.fc13=nn.Linear(150,100)
self.fc14=nn.Linear(100,72)
def forward(self,x):
B=x.shape[0]
density=x[:,:25]
density=density.reshape(B,1,5,5) # B 1(C) 5 5
u = x[:,25:29].reshape(B,1,2,2) # B 1(C) 2 2
v = x[:,29:].reshape(B,1,2,2) # B 1(C) 2 2
displace = torch.cat((u,v),1) # B 2(C) 2 2
displace = self.upsample25x(displace) #升维度 -> B 2 5 5
# 1.矩阵相乘做耦合
# u = torch.mul(displace[:,0,:,:],density[:,0,:,:])
# v = torch.mul(displace[:,1,:,:],density[:,0,:,:])
# x = torch.stack((u,v),1) # B 2 5 5
# x = x.reshape(B,50)
# 2.卷积做耦合
# self.conv55=nn.Conv2d(3,2,3,padding=1) # B 3 5 5 -> B 2 5 5
#
# 3.直接 cat 接上
x = torch.cat((displace,density),1)
x = x.reshape(B,75)
x = torch.autograd.Variable(x,requires_grad=True)
# Encode
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = F.relu(self.fc4(x))
x = F.relu(self.fc5(x))
x = F.relu(self.fc6(x))
x = F.relu(self.fc7(x))
shape_func=x.clone()
# Decode
x = F.relu(self.fc8(x))
x = F.relu(self.fc9(x))
x = F.relu(self.fc10(x))
x = F.relu(self.fc11(x))
x = F.relu(self.fc12(x))
x = F.relu(self.fc13(x))
x = F.relu(self.fc14(x))
return x, shape_func

63
models/CNN.py

@ -1,63 +0,0 @@
import torch
import torch.nn as nn
import torch.nn.functional as F
class CNN_Model(nn.Module):
def __init__(self,input_features=8,out_features=72):
super().__init__()
self.upsample25x=nn.Upsample(scale_factor=2.5, mode='bilinear')
# self.conv55=nn.Conv2d(3,3,3,padding=1) # keep B 3 5 5
# self.conv56=nn.Conv2d(3,3,4,padding=2) # B 3 5 5 -> B 3 5 6
# self.conv66_1=nn.Conv2d(3,5,3,padding=1) # B 3 6 6 -> B 64 6 6
# self.conv66_2=nn.Conv2d(5,8,3,padding=1) # B 64 6 6 -> B 32 6 6
# self.conv66_3=nn.Conv2d(8,5,3,padding=1) # B 32 6 6 -> B 8 6 6
# self.conv66_4=nn.Conv2d(5,2,3,padding=1) # B 8 6 6 -> B 2 6 6
self.conv55=nn.Conv2d(3,2,3,padding=1) # B 3 5 5 -> B 2 5 5
self.fc1=nn.Linear(50,70)
self.fc2=nn.Linear(70,90)
self.fc3=nn.Linear(90,110)
self.fc4=nn.Linear(110,130)
self.fc5=nn.Linear(130,100)
self.fc6=nn.Linear(100,80)
self.fc7=nn.Linear(80,72)
def forward(self,x):
B=x.shape[0]
density=x[:,:25]
density=density.reshape(B,1,5,5) # B 1 5 5
displace = x[:,25:]
displace = displace.reshape(B,2,2,2) # B 2 2 2(C)
displace = displace.permute(0,3,1,2) #更换张量维度顺序为->B C W H
displace = self.upsample25x(displace) #升维度 -> B 2 5 5
# x = torch.cat((displace,density),1)
# x = F.relu(self.conv56(x))
# x = F.relu(self.conv66_1(x))
# x = F.relu(self.conv66_2(x))
# x = F.relu(self.conv66_3(x))
# x = F.relu(self.conv66_4(x))
# x = x.permute(0,2,3,1)
# x = x.reshape(B,72)
u = torch.mul(displace[:,0,:,:],density[:,0,:,:]).reshape(B,25)
v = torch.mul(displace[:,1,:,:],density[:,0,:,:]).reshape(B,25)
x = torch.cat((u,v),1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = F.relu(self.fc4(x))
x = F.relu(self.fc5(x))
x = F.relu(self.fc6(x))
x = F.relu(self.fc7(x))
return x

0
models/__init__.py

0
options/__init__.py

110
options/base_options.py

@ -1,110 +0,0 @@
import argparse
import os
from utils import utils
import torch
class BaseOptions():
"""This class defines options used during both training and test time.
It also implements several helper functions such as parsing, printing, and saving the options.
It also gathers additional options defined in <modify_commandline_options> functions in both dataset class and model class.
"""
def __init__(self):
"""Reset the class; indicates the class hasn't been initailized"""
self.initialized = False
def initialize(self, parser):
"""Define the common options that are used in both training and test."""
# basic parameters
parser.add_argument('--dataroot', type=str, default='./datasets', help='root path to datasets')
parser.add_argument('--name', type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models')
parser.add_argument('--gpu_id', type=str, default='0', help='gpu ids: e.g. 0, 1, ... . use -1 for CPU')
parser.add_argument('--device', type=str, default='cuda:0', help='generate device with gpu_id and usable user device')
parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
parser.add_argument('--ms_ratio', type=int, default=5, help='multiscale ratio')
# model parameters
parser.add_argument('--model', type=str, default='ANN', help='chooses which model to use. [ANN | CNN | AutoEncoder]')
# dataset parameters
# parser.add_argument('--resolution', type=str, default='180_60', help='data resolution. nelx_nely here')
parser.add_argument('--nelx', type=int, default=180, help='num of elements on x-axis')
parser.add_argument('--nely', type=int, default=60, help='num of elements on y-axis')
parser.add_argument('--nelz', type=int, default=1, help='num of elements on z-axis')
parser.add_argument('--dimension', type=int, default=2, help='dimension of dataset models')
parser.add_argument('--is_standard', type=bool, default=False, help='whether need standardization or not')
# additional parameters
parser.add_argument('--epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
parser.add_argument('--load_iter', type=int, default=0, help='which iteration to load? if load_iter > 0, the code will load models by iter_[load_iter]; otherwise, the code will load models by [epoch]')
parser.add_argument('--verbose', action='store_true', help='if specified, print more debugging information')
parser.add_argument('--suffix', type=str, default='', help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{load_size}')
# identify initializiation timing
self.initialized = True
return parser
def gather_options(self):
"""Initialize our parser with basic options(only once).
Add additional model-specific and dataset-specific options.
These options are defined in the <modify_commandline_options> function
in model and dataset classes.
"""
if not self.initialized: # check if it has been initialized
parser = argparse.ArgumentParser() # customize help formatting with <formatter_class>
parser = self.initialize(parser)
# get the basic options
opt, _ = parser.parse_known_args()
# save and return the parser
self.parser = parser
return parser.parse_args()
def print_options(self, opt):
"""Print and save options
It will print both current options and default values(if different).
It will save options into a text file / [checkpoints_dir] / opt.txt
"""
message = ''
message += '----------------- Options ---------------\n'
for k, v in sorted(vars(opt).items()):
comment = ''
default = self.parser.get_default(k)
if v != default:
comment = '\t[default: %s]' % str(default)
message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
message += '----------------- End -------------------'
print(message)
# save to the disk
expr_dir = os.path.join(opt.checkpoints_dir, opt.model+'_'+opt.mod)
utils.mkdir(expr_dir)
file_name = os.path.join(expr_dir, '{}_opt.txt'.format(opt.phase))
with open(file_name, 'wt') as opt_file:
opt_file.write(message)
opt_file.write('\n')
def parse(self):
"""Parse our options, create checkpoints directory suffix, and set up gpu device."""
opt = self.gather_options()
opt.isTrain = self.isTrain # train or test
# process opt.suffix
if opt.suffix:
suffix = ('_' + opt.suffix.format(**vars(opt))) if opt.suffix != '' else ''
opt.name = opt.name + suffix
self.print_options(opt)
# set device with gpu id
if opt.gpu_id == -1:
opt.device = 'cpu'
else:
opt.device = f'cuda:{opt.gpu_id}' if torch.cuda.is_available() else 'cpu'
self.opt = opt
return self.opt

19
options/test_options.py

@ -1,19 +0,0 @@
from .base_options import BaseOptions
class TestOptions(BaseOptions):
"""This class includes test options.
It also includes shared options defined in BaseOptions.
"""
def initialize(self, parser):
parser = BaseOptions.initialize(self, parser) # define shared options
parser.add_argument('--results_dir', type=str, default='./results', help='saves results here.')
parser.add_argument('--phase', type=str, default='test', help='train, val, test, etc')
parser.add_argument('--mod', type=str, default='mod3', help='chooses which dataset model for test. mod1....')
parser.add_argument('--pretrained_model_path', type=str, default='./checkpoints/ANN_mod1/ANN_mod1_opt.pt', help='pretrained model file load path')
self.isTrain = False
return parser

23
options/train_options.py

@ -1,23 +0,0 @@
from .base_options import BaseOptions
class TrainOptions(BaseOptions):
"""This class includes training options.
It also includes shared options defined in BaseOptions.
"""
def initialize(self, parser):
parser = BaseOptions.initialize(self, parser)
# visdom and HTML visualization parameters
# network saving and loading parameters
parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc')
# training parameters
parser.add_argument('--epochs', type=int, default=10000, help='number of epochs')
parser.add_argument('--lr', type=float, default=0.001, help='initial learning rate for adam')
parser.add_argument('--mod', type=str, default='mod2', help='chooses which dataset model for train. mod1....')
self.isTrain = True
return parser

4
requirements.txt

@ -1,4 +0,0 @@
matplotlib==3.8.0
numpy==1.26.2
scikit_learn==1.3.0
torch==2.1.0

60
test.py

@ -1,60 +0,0 @@
import os
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
from utils.data_standardizer import standardization
from utils.data_loader import data_loader
from options.test_options import TestOptions
def test(X, opt):
# OLD: model_load_path, X, standard = False, device = 0
if opt.is_standard:
X = standardization(X)
X_test=torch.from_numpy(X).type(torch.float32).to(opt.device)
model = torch.load(opt.pretrained_model_path)
return model(X_test)
if __name__=='__main__':
# Load parmetaers
opt = TestOptions().parse()
# Load datasets, mod2 as default
global_density, global_displace, coarse_density, coarse_displace, fine_displace = data_loader(opt)
X = np.hstack((coarse_density[:,:] , coarse_displace[:,:,0] , coarse_displace[:,:,1]))
Y = fine_displace[:,:]
# Predict
pred = test(X, opt)
# Set loss function
loss_function = nn.MSELoss()
# Calculate loss
pred_loss=[]
Y_test = torch.from_numpy(Y).type(torch.float32).to(opt.device)
for i in range(pred.shape[0]):
pred_loss.append(loss_function(pred[i,:],Y_test[i,:]).item())
print('Total loss: '+ str(loss_function(pred,Y_test).item()))
# Plot
plt.plot(range(pred.shape[0]),pred_loss)
plt.ylabel('Loss')
plt.xlabel('Coarse mesh id')
plt.title("Linear graph")
plt.savefig(os.path.join(opt.results_dir, 'test_loss.png'))
plt.show()
loss_metrix = np.asarray(pred_loss)
loss_metrix = loss_metrix.reshape(int(opt.nely/opt.ms_ratio), int(opt.nelx/opt.ms_ratio))
plt.matshow(loss_metrix)
plt.title("Show loss value in grid")
plt.savefig(os.path.join(opt.results_dir, 'test_loss_in_grid.png'))
plt.show()

261
topopt_EMsFEA.py

@ -1,261 +0,0 @@
from __future__ import division
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse.linalg import spsolve
from matplotlib import colors
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
from utils.data_standardizer import standardization
from utils.data_loader import data_loader
def top_EMsFEA(nelx,nely,volfrac,penal,rmin,ft,mod_idx,m):
print("Minimum compliance problem with OC")
print("ndes: " + str(nelx) + " x " + str(nely))
print("volfrac: " + str(volfrac) + ", rmin: " + str(rmin) + ", penal: " + str(penal))
print("Filter method: " + ["Sensitivity based","Density based"][ft])
# Max and min stiffness
Emin=1e-9
Emax=1.0
c_nelx=int(nelx/m)
c_nely=int(nely/m)
# dofs:
ndof = 2*(nelx+1)*(nely+1)
coarse_ndof = 2*(c_nelx+1)*(c_nely+1)
# Allocate design variables (as array), initialize and allocate sens.
x=volfrac * np.ones(nely*nelx,dtype=float)
xold=x.copy()
xPhys=x.copy()
g=0 # must be initialized to use the NGuyen/Paulino OC approach
dc=np.zeros((nely,nelx), dtype=float)
# FE: Build the index vectors for the for coo matrix format.
KE=lk()
edofMat=np.zeros((nelx*nely,8),dtype=int)
for elx in range(nelx):
for ely in range(nely):
el = ely+elx*nely
n1=(nely+1)*elx+ely
n2=(nely+1)*(elx+1)+ely
edofMat[el,:]=np.array([2*n1+2, 2*n1+3, 2*n2+2, 2*n2+3,2*n2, 2*n2+1, 2*n1, 2*n1+1])
# Construct the index pointers for the coo format
iK = np.kron(edofMat,np.ones((8,1))).flatten()
jK = np.kron(edofMat,np.ones((1,8))).flatten()
coarse_edofMat=np.zeros((c_nelx*c_nely,8),dtype=int)
for elx in range(c_nelx):
for ely in range(c_nely):
el = ely+elx*c_nely
n1=(c_nely+1)*elx+ely
n2=(c_nely+1)*(elx+1)+ely
coarse_edofMat[el,:]=np.array([2*n1+2, 2*n1+3, 2*n2+2, 2*n2+3,2*n2, 2*n2+1, 2*n1, 2*n1+1])
# Construct the index pointers for the coo format
coarse_iK = np.kron(coarse_edofMat,np.ones((8,1))).flatten()
coarse_jK = np.kron(coarse_edofMat,np.ones((1,8))).flatten()
# Filter: Build (and assemble) the index+data vectors for the coo matrix format
nfilter=int(nelx*nely*((2*(np.ceil(rmin)-1)+1)**2))
iH = np.zeros(nfilter)
jH = np.zeros(nfilter)
sH = np.zeros(nfilter)
cc=0
for i in range(nelx):
for j in range(nely):
row=i*nely+j
kk1=int(np.maximum(i-(np.ceil(rmin)-1),0))
kk2=int(np.minimum(i+np.ceil(rmin),nelx))
ll1=int(np.maximum(j-(np.ceil(rmin)-1),0))
ll2=int(np.minimum(j+np.ceil(rmin),nely))
for k in range(kk1,kk2):
for l in range(ll1,ll2):
col=k*nely+l
fac=rmin-np.sqrt(((i-k)*(i-k)+(j-l)*(j-l)))
iH[cc]=row
jH[cc]=col
sH[cc]=np.maximum(0.0,fac)
cc=cc+1
# Finalize assembly and convert to csc format
H=coo_matrix((sH,(iH,jH)),shape=(nelx*nely,nelx*nely)).tocsc()
Hs=H.sum(1)
# BC's and support
# dofs=np.arange(2*(nelx+1)*(nely+1))
# fixed=np.union1d(dofs[0:2*(nely+1):2],np.array([2*(nelx+1)*(nely+1)-1]))
# free=np.setdiff1d(dofs,fixed)
coarse_dofs=np.arange(2*(c_nelx+1)*(c_nely+1))
coarse_fixed=np.union1d(coarse_dofs[0:2*(c_nely+1):2],np.array([2*(c_nelx+1)*(c_nely+1)-1]))
coarse_free=np.setdiff1d(coarse_dofs,coarse_fixed)
# Solution and RHS vectors
# f=np.zeros((ndof,1))
# u=np.zeros((ndof,1))
c_f=np.zeros((coarse_ndof,1))
c_u=np.zeros((coarse_ndof,1))
# Set load
# f[1,0]=-1
c_f[1,0]=-1
# Initialize plot and plot the initial design
plt.ion() # Ensure that redrawing is possible
fig,ax = plt.subplots()
im = ax.imshow(-xPhys.reshape((nelx,nely)).T, cmap='gray',\
interpolation='none',norm=colors.Normalize(vmin=-1,vmax=0))
fig.show()
# Set loop counter and gradient vectors
loop=0
change=1
dv = np.ones(nely*nelx)
dc = np.ones(nely*nelx)
ce = np.ones(nely*nelx)
while change>0.01 and loop<2000:
loop=loop+1
# Setup and solve FE problem
coarse_xPhys=xPhys[12::25]
sK=((KE.flatten()[np.newaxis]).T*(Emin+(coarse_xPhys)**penal*(Emax-Emin))).flatten(order='F')
K = coo_matrix((sK,(coarse_iK,coarse_jK)),shape=(coarse_ndof,coarse_ndof)).tocsc()
# Remove constrained dofs from matrix
K = K[coarse_free,:][:,coarse_free]
# Solve coarse situation
c_u[coarse_free,0]=spsolve(K,c_f[coarse_free,0])
# Predict fine situation
u=pred_net(c_u,xPhys,c_nelx,c_nely,m,'checkpoints/ANN_mod1/ANN_mod1_opt.pt')
# print(f.shape, f)
# print(K.shape, K)
# print(f[free,0])
# print(u.shape, u)
# Objective and sensitivity
ce[:] = (np.dot(u[edofMat].reshape(nelx*nely,8),KE) * u[edofMat].reshape(nelx*nely,8) ).sum(1)
obj=( (Emin+xPhys**penal*(Emax-Emin))*ce ).sum()
dc[:]=(-penal*xPhys**(penal-1)*(Emax-Emin))*ce
dv[:] = np.ones(nely*nelx)
# Sensitivity filtering:
if ft==0:
dc[:] = np.asarray((H*(x*dc))[np.newaxis].T/Hs)[:,0] / np.maximum(0.001,x)
elif ft==1:
dc[:] = np.asarray(H*(dc[np.newaxis].T/Hs))[:,0]
dv[:] = np.asarray(H*(dv[np.newaxis].T/Hs))[:,0]
# Optimality criteria
xold[:]=x
(x[:],g)=oc(nelx,nely,x,volfrac,dc,dv,g)
# Filter design variables
if ft==0: xPhys[:]=x
elif ft==1: xPhys[:]=np.asarray(H*x[np.newaxis].T/Hs)[:,0]
# Compute the change by the inf. norm
change=np.linalg.norm(x.reshape(nelx*nely,1)-xold.reshape(nelx*nely,1),np.inf)
# Plot to screen
im.set_array(-xPhys.reshape((nelx,nely)).T)
fig.canvas.draw()
# Write iteration history to screen (req. Python 2.6 or newer)
print("it.: {0} , obj.: {1:.3f} Vol.: {2:.3f}, ch.: {3:.3f}".format(\
loop,obj,(g+volfrac*nelx*nely)/(nelx*nely),change))
np.save('results/top88_' + mod_idx + '_xPhys_' + str(nelx) + '_' + str(nely) + '.npy', xPhys.reshape((nelx,nely)).T)
np.save('results/top88_' + mod_idx + '_u_' + str(nelx) + '_' + str(nely) + '.npy', u)
plt.savefig('results/top88_' + mod_idx + '_img_' + str(nelx) + '_' + str(nely) + '.jpg')
# plt.show()
print(u.reshape(nelx+1,nely+1,2))
# Make sure the plot stays and that the shell remains
np.save('results/top88_' + mod_idx + '_xPhys_' + str(nelx) + '_' + str(nely) + '.npy', xPhys.reshape((nelx,nely)).T)
np.save('results/top88_' + mod_idx + '_u_' + str(nelx) + '_' + str(nely) + '.npy', u)
plt.savefig('results/top88_' + mod_idx + '_img_' + str(nelx) + '_' + str(nely) + '.jpg')
plt.show()
print("Press any key...")
#element stiffness matrix
def lk():
E=1
nu=0.3
k=np.array([1/2-nu/6,1/8+nu/8,-1/4-nu/12,-1/8+3*nu/8,-1/4+nu/12,-1/8-nu/8,nu/6,1/8-3*nu/8])
KE = E/(1-nu**2)*np.array([ [k[0], k[1], k[2], k[3], k[4], k[5], k[6], k[7]],
[k[1], k[0], k[7], k[6], k[5], k[4], k[3], k[2]],
[k[2], k[7], k[0], k[5], k[6], k[3], k[4], k[1]],
[k[3], k[6], k[5], k[0], k[7], k[2], k[1], k[4]],
[k[4], k[5], k[6], k[7], k[0], k[1], k[2], k[3]],
[k[5], k[4], k[3], k[2], k[1], k[0], k[7], k[6]],
[k[6], k[3], k[4], k[1], k[2], k[7], k[0], k[5]],
[k[7], k[2], k[1], k[4], k[3], k[6], k[5], k[0]] ]);
return (KE)
# Optimality criterion
def oc(nelx,nely,x,volfrac,dc,dv,g):
l1=0
l2=1e9
move=0.2
# reshape to perform vector operations
xnew=np.zeros(nelx*nely)
while (l2-l1)/(l1+l2)>1e-3:
lmid=0.5*(l2+l1)
xnew[:]= np.maximum(0.0,np.maximum(x-move,np.minimum(1.0,np.minimum(x+move,x*np.sqrt(-dc/dv/lmid)))))
gt=g+np.sum((dv*(xnew-x)))
if gt>0 :
l1=lmid
else:
l2=lmid
return (xnew,gt)
def pred_net(coarse_u,fine_x,coarse_nelx,coarse_nely,m, model_load_path, standard = False, device = 0):
coarse_density=np.zeros(shape=(coarse_nely*coarse_nelx,m*m))
fine_x=fine_x.reshape(coarse_nely*m,coarse_nelx*m)
for ely in range(coarse_nely):
for elx in range(coarse_nelx):
coarse_density[elx + ely * m] = fine_x[ely * m : (ely + 1) * m, elx * m : (elx + 1) * m].flatten()
print(coarse_density.shape)
global_displace = coarse_u.reshape(coarse_nelx+1,coarse_nely+1,2)
global_displace = np.dstack((global_displace[:,:,0].T, global_displace[:,:,1].T))
coarse_displace=np.zeros(shape=(coarse_nely*coarse_nelx,4,2))
for ely in range(coarse_nely):
for elx in range(coarse_nelx):
coarse_displace[elx + ely][0] = global_displace[ely, elx, :]
coarse_displace[elx + ely][1] = global_displace[ely, (elx+1), :]
coarse_displace[elx + ely][2] = global_displace[(ely+1), elx, :]
coarse_displace[elx + ely][3] = global_displace[(ely+1), (elx+1), :]
print(coarse_displace.shape)
X = np.hstack((coarse_density[:,:] , coarse_displace[:,:,0] , coarse_displace[:,:,1]))
model = torch.load(model_load_path)
if standard:
X = standardization(X)
device = f'cuda:{device}' if torch.cuda.is_available() else 'cpu'
X = torch.from_numpy(X).type(torch.float32).to(device)
pred=model(X).cpu().detach().numpy()
# print(pred)
u_reconstructed = np.zeros(shape=(coarse_nely*m+1, coarse_nelx*m+1, 2))
for ely in range(coarse_nely):
for elx in range(coarse_nelx):
u_reconstructed[ely*m : (ely+1)*m+1, elx*m : (elx+1)*m+1, :] = pred[elx + ely * m].reshape((m+1, m+1, 2))
# u_reconstructed = u_reconstructed.reshape(coarse_nely*m+1, coarse_nelx*m+1,2)
u_reconstructed = np.dstack((u_reconstructed[:,:,0].T, u_reconstructed[:,:,1].T))
u_reconstructed=u_reconstructed.flatten()
return u_reconstructed
# The real main driver
if __name__ == "__main__":
mod_idx='test1'
m=5
nelx=180
nely=60
volfrac=0.4
rmin=5.4
penal=3.0
ft=1 # ft==0 -> sens, ft==1 -> dens
top_EMsFEA(nelx,nely,volfrac,penal,rmin,ft,mod_idx,m)

76
train.py

@ -1,76 +0,0 @@
import time
import os
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
from utils.data_standardizer import standardization
from utils.data_loader import data_loader
from options.train_options import TrainOptions
from models.ANN import ANN_Model
def train(X, Y, opt):
if opt.is_standard:
X = standardization(X)
Y = standardization(Y)
X_train=torch.from_numpy(X).type(torch.float32).to(opt.device)
Y_train=torch.from_numpy(Y).type(torch.float32).to(opt.device)
# Load net model
torch.manual_seed(20)
model_name=opt.model+'_Model'
model = eval(model_name)() # ANN_Model() as default
model.parameters
model=model.to(opt.device)
print(model)
# Set loss function
loss_function = nn.MSELoss()
# Set adam optimizer
optimizer=torch.optim.Adam(model.parameters(),lr=opt.lr) # ANN 学习率最好0.001 左右(无归一化)
# Train
start_time=time.time()
losses=[]
for i in range(opt.epochs):
pred = model.forward(X_train)
loss=loss_function(pred,Y_train)
# loss.requires_grad_(True)
losses.append(loss.cpu().detach().numpy())
if i%(opt.epochs/10)==1:
print("Epoch number: {} and the loss : {}".format(i,loss.item()))
optimizer.zero_grad()
loss.backward()
optimizer.step()
print(time.time()-start_time)
# save trained model, mkdir opt has done in options/base_options.py
save_path=os.path.join(opt.checkpoints_dir, opt.model+'_'+opt.mod, opt.model+'_'+opt.mod+'_opt.pt')
torch.save(model, save_path)
return losses
if __name__=='__main__':
# Load parmetaers
opt = TrainOptions().parse()
# Load datasets, mod1 as default
global_density, global_displace, coarse_density, coarse_displace, fine_displace = data_loader(opt)
X = np.hstack((coarse_density[:,:] , coarse_displace[:,:,0] , coarse_displace[:,:,1]))
Y = fine_displace[:,:]
# Train
losses = train(X, Y, opt)
# plot loss
plt.plot(range(opt.epochs),losses)
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.show()

54
utils/data_loader.py

@ -1,54 +0,0 @@
import numpy as np
import os
def data_loader(opt):
# Load datasets
# './datasets/train/180_60/u_OR_xPhys/mod1.npy' as default
density_load_path = os.path.join(opt.dataroot, opt.phase, str(opt.nelx)+'_'+str(opt.nely), 'xPhys', opt.mod+'.npy')
displace_load_path = os.path.join(opt.dataroot, opt.phase, str(opt.nelx)+'_'+str(opt.nely), 'u', opt.mod+'.npy')
global_density = np.load(density_load_path) # (nely , nelx)
global_displace = np.load(displace_load_path) # ( (nely+1)*(nelx+1)*2 , 1 )
global_displace = global_displace.reshape(opt.nelx+1, opt.nely+1, 2)
global_displace = np.dstack((global_displace[:,:,0].T, global_displace[:,:,1].T)) # -> ( (nelx+1), (nelx+1), 2 )
m=opt.ms_ratio
N=(m+1)**2
global_nely=global_density.shape[0]
global_nelx=global_density.shape[1]
coarse_nely = int(global_nely/m)
coarse_nelx = int(global_nelx/m)
# Generate coarse mesh density
coarse_density=np.zeros(shape=(coarse_nely*coarse_nelx,m*m))
for ely in range(coarse_nely):
for elx in range(coarse_nelx):
coarse_density[elx + ely * m] = global_density[ely * m : (ely + 1) * m, elx * m : (elx + 1) * m].flatten()
print(coarse_density.shape)
# Generate coarse mesh displacement
coarse_displace=np.zeros(shape=(coarse_nely*coarse_nelx,4,2))
for ely in range(coarse_nely):
for elx in range(coarse_nelx):
coarse_displace[elx + ely * m][0] = global_displace[ely * m, elx * m, :]
coarse_displace[elx + ely * m][1] = global_displace[ely * m, (elx+1) * m, :]
coarse_displace[elx + ely * m][2] = global_displace[(ely+1) * m, elx * m, :]
coarse_displace[elx + ely * m][3] = global_displace[(ely+1) * m, (elx+1) * m, :]
print(coarse_displace.shape)
# Generate fine mesh displacement
fine_displace=np.zeros(shape=(coarse_nely*coarse_nelx, ((m+1)**2) * 2))
for ely in range(coarse_nely):
for elx in range(coarse_nelx):
fine_displace[elx + ely * m] = global_displace[ely*m : (ely+1)*m+1, elx*m : (elx+1)*m+1, :].flatten()
print(fine_displace.shape)
return global_density, global_displace, coarse_density, coarse_displace, fine_displace
if __name__=='__main__':
from options.train_options import TrainOptions
opt = TrainOptions().parse()
data_loader(opt)

7
utils/data_standardizer.py

@ -1,7 +0,0 @@
import numpy as np
def standardization(data):
mu = np.mean(data, axis=0)
sigma = np.std(data, axis=0)
return (data - mu) / sigma

185
utils/topopt_88.py

@ -1,185 +0,0 @@
# A 165 LINE TOPOLOGY OPTIMIZATION CODE BY NIELS AAGE AND VILLADS EGEDE JOHANSEN, JANUARY 2013
from __future__ import division
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse.linalg import spsolve
from matplotlib import colors
import matplotlib.pyplot as plt
# MAIN DRIVER
def top88(nelx,nely,volfrac,penal,rmin,ft,mod_idx):
print("Minimum compliance problem with OC")
print("ndes: " + str(nelx) + " x " + str(nely))
print("volfrac: " + str(volfrac) + ", rmin: " + str(rmin) + ", penal: " + str(penal))
print("Filter method: " + ["Sensitivity based","Density based"][ft])
# Max and min stiffness
Emin=1e-9
Emax=1.0
# dofs:
ndof = 2*(nelx+1)*(nely+1)
# Allocate design variables (as array), initialize and allocate sens.
x=volfrac * np.ones(nely*nelx,dtype=float)
xold=x.copy()
xPhys=x.copy()
g=0 # must be initialized to use the NGuyen/Paulino OC approach
dc=np.zeros((nely,nelx), dtype=float)
# FE: Build the index vectors for the for coo matrix format.
KE=lk()
edofMat=np.zeros((nelx*nely,8),dtype=int)
for elx in range(nelx):
for ely in range(nely):
el = ely+elx*nely
n1=(nely+1)*elx+ely
n2=(nely+1)*(elx+1)+ely
edofMat[el,:]=np.array([2*n1+2, 2*n1+3, 2*n2+2, 2*n2+3,2*n2, 2*n2+1, 2*n1, 2*n1+1])
# Construct the index pointers for the coo format
iK = np.kron(edofMat,np.ones((8,1))).flatten()
jK = np.kron(edofMat,np.ones((1,8))).flatten()
# Filter: Build (and assemble) the index+data vectors for the coo matrix format
nfilter=int(nelx*nely*((2*(np.ceil(rmin)-1)+1)**2))
iH = np.zeros(nfilter)
jH = np.zeros(nfilter)
sH = np.zeros(nfilter)
cc=0
for i in range(nelx):
for j in range(nely):
row=i*nely+j
kk1=int(np.maximum(i-(np.ceil(rmin)-1),0))
kk2=int(np.minimum(i+np.ceil(rmin),nelx))
ll1=int(np.maximum(j-(np.ceil(rmin)-1),0))
ll2=int(np.minimum(j+np.ceil(rmin),nely))
for k in range(kk1,kk2):
for l in range(ll1,ll2):
col=k*nely+l
fac=rmin-np.sqrt(((i-k)*(i-k)+(j-l)*(j-l)))
iH[cc]=row
jH[cc]=col
sH[cc]=np.maximum(0.0,fac)
cc=cc+1
# Finalize assembly and convert to csc format
H=coo_matrix((sH,(iH,jH)),shape=(nelx*nely,nelx*nely)).tocsc()
Hs=H.sum(1)
# BC's and support
dofs=np.arange(2*(nelx+1)*(nely+1))
fixed=np.union1d(dofs[0:2*(nely+1):2],np.array([2*(nelx+1)*(nely+1)-1]))
free=np.setdiff1d(dofs,fixed)
# Solution and RHS vectors
f=np.zeros((ndof,1))
u=np.zeros((ndof,1))
# Set load
f[1,0]=-1
# Initialize plot and plot the initial design
plt.ion() # Ensure that redrawing is possible
fig,ax = plt.subplots()
im = ax.imshow(-xPhys.reshape((nelx,nely)).T, cmap='gray',\
interpolation='none',norm=colors.Normalize(vmin=-1,vmax=0))
fig.show()
# Set loop counter and gradient vectors
loop=0
change=1
dv = np.ones(nely*nelx)
dc = np.ones(nely*nelx)
ce = np.ones(nely*nelx)
while change>0.01 and loop<2000:
loop=loop+1
# Setup and solve FE problem
sK=((KE.flatten()[np.newaxis]).T*(Emin+(xPhys)**penal*(Emax-Emin))).flatten(order='F')
K = coo_matrix((sK,(iK,jK)),shape=(ndof,ndof)).tocsc()
# Remove constrained dofs from matrix
K = K[free,:][:,free]
# Solve system
u[free,0]=spsolve(K,f[free,0])
# print(f.shape, f)
# print(K.shape, K)
# print(f[free,0])
# print(u.shape, u)
# Objective and sensitivity
ce[:] = (np.dot(u[edofMat].reshape(nelx*nely,8),KE) * u[edofMat].reshape(nelx*nely,8) ).sum(1)
obj=( (Emin+xPhys**penal*(Emax-Emin))*ce ).sum()
dc[:]=(-penal*xPhys**(penal-1)*(Emax-Emin))*ce
dv[:] = np.ones(nely*nelx)
# Sensitivity filtering:
if ft==0:
dc[:] = np.asarray((H*(x*dc))[np.newaxis].T/Hs)[:,0] / np.maximum(0.001,x)
elif ft==1:
dc[:] = np.asarray(H*(dc[np.newaxis].T/Hs))[:,0]
dv[:] = np.asarray(H*(dv[np.newaxis].T/Hs))[:,0]
# Optimality criteria
xold[:]=x
(x[:],g)=oc(nelx,nely,x,volfrac,dc,dv,g)
# Filter design variables
if ft==0: xPhys[:]=x
elif ft==1: xPhys[:]=np.asarray(H*x[np.newaxis].T/Hs)[:,0]
# Compute the change by the inf. norm
change=np.linalg.norm(x.reshape(nelx*nely,1)-xold.reshape(nelx*nely,1),np.inf)
# Plot to screen
im.set_array(-xPhys.reshape((nelx,nely)).T)
fig.canvas.draw()
# Write iteration history to screen (req. Python 2.6 or newer)
print("it.: {0} , obj.: {1:.3f} Vol.: {2:.3f}, ch.: {3:.3f}".format(\
loop,obj,(g+volfrac*nelx*nely)/(nelx*nely),change))
np.save('results/top88_' + mod_idx + '_xPhys_' + str(nelx) + '_' + str(nely) + '.npy', xPhys.reshape((nelx,nely)).T)
np.save('results/top88_' + mod_idx + '_u_' + str(nelx) + '_' + str(nely) + '.npy', u)
plt.savefig('results/top88_' + mod_idx + '_img_' + str(nelx) + '_' + str(nely) + '.jpg')
# plt.show()
print(u.reshape(nelx+1,nely+1,2))
# Make sure the plot stays and that the shell remains
np.save('results/top88_' + mod_idx + '_xPhys_' + str(nelx) + '_' + str(nely) + '.npy', xPhys.reshape((nelx,nely)).T)
np.save('results/top88_' + mod_idx + '_u_' + str(nelx) + '_' + str(nely) + '.npy', u)
plt.savefig('results/top88_' + mod_idx + '_img_' + str(nelx) + '_' + str(nely) + '.jpg')
plt.show()
print("Press any key...")
#element stiffness matrix
def lk():
E=1
nu=0.3
k=np.array([1/2-nu/6,1/8+nu/8,-1/4-nu/12,-1/8+3*nu/8,-1/4+nu/12,-1/8-nu/8,nu/6,1/8-3*nu/8])
KE = E/(1-nu**2)*np.array([ [k[0], k[1], k[2], k[3], k[4], k[5], k[6], k[7]],
[k[1], k[0], k[7], k[6], k[5], k[4], k[3], k[2]],
[k[2], k[7], k[0], k[5], k[6], k[3], k[4], k[1]],
[k[3], k[6], k[5], k[0], k[7], k[2], k[1], k[4]],
[k[4], k[5], k[6], k[7], k[0], k[1], k[2], k[3]],
[k[5], k[4], k[3], k[2], k[1], k[0], k[7], k[6]],
[k[6], k[3], k[4], k[1], k[2], k[7], k[0], k[5]],
[k[7], k[2], k[1], k[4], k[3], k[6], k[5], k[0]] ]);
return (KE)
# Optimality criterion
def oc(nelx,nely,x,volfrac,dc,dv,g):
l1=0
l2=1e9
move=0.2
# reshape to perform vector operations
xnew=np.zeros(nelx*nely)
while (l2-l1)/(l1+l2)>1e-3:
lmid=0.5*(l2+l1)
xnew[:]= np.maximum(0.0,np.maximum(x-move,np.minimum(1.0,np.minimum(x+move,x*np.sqrt(-dc/dv/lmid)))))
gt=g+np.sum((dv*(xnew-x)))
if gt>0 :
l1=lmid
else:
l2=lmid
return (xnew,gt)
# The real main driver
if __name__ == "__main__":
# Default input parameters
mod_idx='mod4'
nelx=30
nely=10
volfrac=0.4
rmin=5.4
penal=3.0
ft=1 # ft==0 -> sens, ft==1 -> dens
import sys
if len(sys.argv)>1: nelx =int(sys.argv[1])
if len(sys.argv)>2: nely =int(sys.argv[2])
if len(sys.argv)>3: volfrac=float(sys.argv[3])
if len(sys.argv)>4: rmin =float(sys.argv[4])
if len(sys.argv)>5: penal =float(sys.argv[5])
if len(sys.argv)>6: ft =int(sys.argv[6])
top88(nelx,nely,volfrac,penal,rmin,ft,mod_idx)

23
utils/utils.py

@ -1,23 +0,0 @@
import os
def mkdirs(paths):
"""create empty directories if they don't exist
Parameters:
paths (str list) -- a list of directory paths
"""
if isinstance(paths, list) and not isinstance(paths, str):
for path in paths:
mkdir(path)
else:
mkdir(paths)
def mkdir(path):
"""create a single empty directory if it didn't exist
Parameters:
path (str) -- a single directory path
"""
if not os.path.exists(path):
os.makedirs(path)

890
visualization.ipynb

File diff suppressed because one or more lines are too long
Loading…
Cancel
Save