该项目是《Problem-independent machine learning (PIML)-based topology optimization—A universal approach》的python复现
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 

285 lines
10 KiB

# from __future__ import division
import numpy as np
import os
from scipy.sparse import coo_matrix
from scipy.sparse.linalg import spsolve
from matplotlib import colors
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
from utils.data_standardizer import standardization
from utils.mesh_reshape import Ms_u_reshape
from options.topopt_options import TopoptOption
def top_EMsFEA(opt):
mod_idx=opt.mod_idx
m=opt.ms_ratio_to
nelx=opt.nelx_to
nely=opt.nely_to
volfrac=opt.volfrac
rmin=opt.rmin
penal=opt.penal
ft=opt.ft # ft==0 -> sens, ft==1 -> dens
print("Minimum compliance problem with OC")
print("ndes: " + str(nelx) + " x " + str(nely))
print("volfrac: " + str(volfrac) + ", rmin: " + str(rmin) + ", penal: " + str(penal))
print("Filter method: " + ["Sensitivity based","Density based"][ft])
# Max and min stiffness
Emin=1e-9
Emax=1.0
c_nelx=int(nelx/m)
c_nely=int(nely/m)
# dofs:
ndof = 2*(nelx+1)*(nely+1)
coarse_ndof = 2*(c_nelx+1)*(c_nely+1)
# Allocate design variables (as array), initialize and allocate sens.
x=volfrac * np.ones(nely*nelx,dtype=float)
xold=x.copy()
xPhys=x.copy()
g=0 # must be initialized to use the NGuyen/Paulino OC approach
dc=np.zeros((nely,nelx), dtype=float)
# FE: Build the index vectors for the for coo matrix format.
KE=lk()
edofMat=np.zeros((nelx*nely,8),dtype=int)
for elx in range(nelx):
for ely in range(nely):
el = ely+elx*nely
n1=(nely+1)*elx+ely
n2=(nely+1)*(elx+1)+ely
edofMat[el,:]=np.array([2*n1+2, 2*n1+3, 2*n2+2, 2*n2+3,2*n2, 2*n2+1, 2*n1, 2*n1+1])
# Construct the index pointers for the coo format
iK = np.kron(edofMat,np.ones((8,1))).flatten()
jK = np.kron(edofMat,np.ones((1,8))).flatten()
coarse_edofMat=np.zeros((c_nelx*c_nely,8),dtype=int)
for elx in range(c_nelx):
for ely in range(c_nely):
el = ely+elx*c_nely
n1=(c_nely+1)*elx+ely
n2=(c_nely+1)*(elx+1)+ely
coarse_edofMat[el,:]=np.array([2*n1+2, 2*n1+3, 2*n2+2, 2*n2+3,2*n2, 2*n2+1, 2*n1, 2*n1+1])
# Construct the index pointers for the coo format
coarse_iK = np.kron(coarse_edofMat,np.ones((8,1))).flatten()
coarse_jK = np.kron(coarse_edofMat,np.ones((1,8))).flatten()
# Filter: Build (and assemble) the index+data vectors for the coo matrix format
nfilter=int(nelx*nely*((2*(np.ceil(rmin)-1)+1)**2))
iH = np.zeros(nfilter)
jH = np.zeros(nfilter)
sH = np.zeros(nfilter)
cc=0
for i in range(nelx):
for j in range(nely):
row=i*nely+j
kk1=int(np.maximum(i-(np.ceil(rmin)-1),0))
kk2=int(np.minimum(i+np.ceil(rmin),nelx))
ll1=int(np.maximum(j-(np.ceil(rmin)-1),0))
ll2=int(np.minimum(j+np.ceil(rmin),nely))
for k in range(kk1,kk2):
for l in range(ll1,ll2):
col=k*nely+l
fac=rmin-np.sqrt(((i-k)*(i-k)+(j-l)*(j-l)))
iH[cc]=row
jH[cc]=col
sH[cc]=np.maximum(0.0,fac)
cc=cc+1
# Finalize assembly and convert to csc format
H=coo_matrix((sH,(iH,jH)),shape=(nelx*nely,nelx*nely)).tocsc()
Hs=H.sum(1)
# BC's and support
# dofs=np.arange(2*(nelx+1)*(nely+1))
# fixed=np.union1d(dofs[0:2*(nely+1):2],np.array([2*(nelx+1)*(nely+1)-1]))
# free=np.setdiff1d(dofs,fixed)
coarse_dofs=np.arange(2*(c_nelx+1)*(c_nely+1))
coarse_fixed=np.union1d(coarse_dofs[0:2*(c_nely+1):2],np.array([2*(c_nelx+1)*(c_nely+1)-1]))
coarse_free=np.setdiff1d(coarse_dofs,coarse_fixed)
# Solution and RHS vectors
# f=np.zeros((ndof,1))
# u=np.zeros((ndof,1))
c_f=np.zeros((coarse_ndof,1))
c_u=np.zeros((coarse_ndof,1))
# Set load
# f[1,0]=-1
c_f[1,0]=-1
# Initialize plot and plot the initial design
plt.ion() # Ensure that redrawing is possible
fig,ax = plt.subplots()
im = ax.imshow(-xPhys.reshape((nelx,nely)).T, cmap='gray',\
interpolation='none',norm=colors.Normalize(vmin=-1,vmax=0))
fig.show()
# Set loop counter and gradient vectors
loop=0
change=1
dv = np.ones(nely*nelx)
dc = np.ones(nely*nelx)
ce = np.ones(nely*nelx)
while change>0.01 and loop<2000:
loop=loop+1
# Setup and solve FE problem
coarse_xPhys=xPhys[12::25]
sK=((KE.flatten()[np.newaxis]).T*(Emin+(coarse_xPhys)**penal*(Emax-Emin))).flatten(order='F')
K = coo_matrix((sK,(coarse_iK,coarse_jK)),shape=(coarse_ndof,coarse_ndof)).tocsc()
# Remove constrained dofs from matrix
K = K[coarse_free,:][:,coarse_free]
# Solve coarse situation
c_u[coarse_free,0]=spsolve(K,c_f[coarse_free,0])
# Predict fine situation
u=pred_net(c_u,xPhys,opt)
# u=pred_net(c_u,xPhys,c_nelx,c_nely,m,'checkpoints/ANN_mod1/ANN_mod1_opt.pt')
# print(f.shape, f)
# print(K.shape, K)
# print(f[free,0])
# print(u.shape, u)
# Objective and sensitivity
ce[:] = (np.dot(u[edofMat].reshape(nelx*nely,8),KE) * u[edofMat].reshape(nelx*nely,8) ).sum(1)
obj=( (Emin+xPhys**penal*(Emax-Emin))*ce ).sum()
dc[:]=(-penal*xPhys**(penal-1)*(Emax-Emin))*ce
dv[:] = np.ones(nely*nelx)
# Sensitivity filtering:
if ft==0:
dc[:] = np.asarray((H*(x*dc))[np.newaxis].T/Hs)[:,0] / np.maximum(0.001,x)
elif ft==1:
dc[:] = np.asarray(H*(dc[np.newaxis].T/Hs))[:,0]
dv[:] = np.asarray(H*(dv[np.newaxis].T/Hs))[:,0]
# Optimality criteria
xold[:]=x
(x[:],g)=oc(nelx,nely,x,volfrac,dc,dv,g)
# Filter design variables
if ft==0: xPhys[:]=x
elif ft==1: xPhys[:]=np.asarray(H*x[np.newaxis].T/Hs)[:,0]
# Compute the change by the inf. norm
change=np.linalg.norm(x.reshape(nelx*nely,1)-xold.reshape(nelx*nely,1),np.inf)
# Plot to screen
im.set_array(-xPhys.reshape((nelx,nely)).T)
fig.canvas.draw()
# Write iteration history to screen (req. Python 2.6 or newer)
print("it.: {0} , obj.: {1:.3f} Vol.: {2:.3f}, ch.: {3:.3f}".format(\
loop,obj,(g+volfrac*nelx*nely)/(nelx*nely),change))
np.save('results/EMsNetTop_' + mod_idx + '_xPhys_' + str(nelx) + '_' + str(nely) + '.npy', xPhys.reshape((nelx,nely)).T)
np.save('results/EMsNetTop_' + mod_idx + '_u_' + str(nelx) + '_' + str(nely) + '.npy', u)
plt.savefig('results/EMsNetTop_' + mod_idx + '_img_' + str(nelx) + '_' + str(nely) + '.jpg')
# plt.show()
print(u.reshape(nelx+1,nely+1,2))
# Make sure the plot stays and that the shell remains
np.save('results/EMsNetTop_' + mod_idx + '_xPhys_' + str(nelx) + '_' + str(nely) + '.npy', xPhys.reshape((nelx,nely)).T)
np.save('results/EMsNetTop_' + mod_idx + '_u_' + str(nelx) + '_' + str(nely) + '.npy', u)
plt.savefig('results/EMsNetTop_' + mod_idx + '_img_' + str(nelx) + '_' + str(nely) + '.jpg')
plt.show()
print("Press any key...")
#element stiffness matrix
def lk():
E=1
nu=0.3
k=np.array([1/2-nu/6,1/8+nu/8,-1/4-nu/12,-1/8+3*nu/8,-1/4+nu/12,-1/8-nu/8,nu/6,1/8-3*nu/8])
KE = E/(1-nu**2)*np.array([ [k[0], k[1], k[2], k[3], k[4], k[5], k[6], k[7]],
[k[1], k[0], k[7], k[6], k[5], k[4], k[3], k[2]],
[k[2], k[7], k[0], k[5], k[6], k[3], k[4], k[1]],
[k[3], k[6], k[5], k[0], k[7], k[2], k[1], k[4]],
[k[4], k[5], k[6], k[7], k[0], k[1], k[2], k[3]],
[k[5], k[4], k[3], k[2], k[1], k[0], k[7], k[6]],
[k[6], k[3], k[4], k[1], k[2], k[7], k[0], k[5]],
[k[7], k[2], k[1], k[4], k[3], k[6], k[5], k[0]] ]);
return (KE)
# Optimality criterion
def oc(nelx,nely,x,volfrac,dc,dv,g):
l1=0
l2=1e9
move=0.2
# reshape to perform vector operations
xnew=np.zeros(nelx*nely)
while (l2-l1)/(l1+l2)>1e-3:
lmid=0.5*(l2+l1)
xnew[:]= np.maximum(0.0,np.maximum(x-move,np.minimum(1.0,np.minimum(x+move,x*np.sqrt(-dc/dv/lmid)))))
gt=g+np.sum((dv*(xnew-x)))
if gt>0 :
l1=lmid
else:
l2=lmid
return (xnew,gt)
def pred_net(coarse_u,global_x,opt):
m=opt.ms_ratio_to
nelx=opt.nelx_to
nely=opt.nely_to
coarse_nelx=int(nelx/m)
coarse_nely=int(nely/m)
c_N=coarse_nelx*coarse_nely
N=(opt.ms_ratio_to+1)**2 * 2
# Generate coarse mesh density
global_density=global_x.reshape(nelx,nely) # -> (nelx , nely)
coarse_density = np.lib.stride_tricks.as_strided(
global_density,
shape=(coarse_nelx, coarse_nely, m, m),
strides=global_density.itemsize * np.array([nely*m, m, nely, 1])
)
# Generate coarse mesh displacement
coarse_displace= np.lib.stride_tricks.as_strided(
coarse_u,
shape=(coarse_nelx, coarse_nely, 2, 2, 2),
strides=coarse_u.itemsize * np.array([(coarse_nely+1)*2, 1*2, (coarse_nely+1)*2, 1*2, 1])
)
# data preprocess
X = np.hstack((coarse_density.reshape(c_N,m*m), coarse_displace.reshape(c_N,8)))
if opt.is_standard:
X = standardization(X)
X=torch.from_numpy(X).type(torch.float32).to(opt.device)
# predict
model = torch.load(opt.pretrained_model_path)
pred=torch.zeros(X.shape[0], N)
for batch_idx, data_batch in enumerate(X):
pred_ShapeFunction=model(data_batch)
pred[batch_idx,:]=pred_ShapeFunction.reshape(N,8) @ data_batch[25:]
pred=pred.to('cpu').detach().numpy()
pred=Ms_u_reshape(pred, coarse_nelx, coarse_nely, m)
pred=pred.reshape((nelx+1)*(nely+1)*2,1)
return pred
# The real main driver
if __name__ == "__main__":
# Load parmetaers
opt = TopoptOption().parse()
# mod_idx='test1'
# m=5
# nelx=180
# nely=60
# volfrac=0.4
# rmin=5.4
# penal=3.0
# ft=1 # ft==0 -> sens, ft==1 -> dens
top_EMsFEA(opt)
# u=np.load('./results/coarse_u.npy')
# x=np.load('./results/global_x.npy')
# pred_net(u,x,opt)