Browse Source

暂存

final
mckay 2 weeks ago
parent
commit
87a1654d03
  1. 2
      brep2sdf/config/default_config.py
  2. 4
      brep2sdf/networks/loss.py
  3. 61903
      brep2sdf/nohup.out.5.20
  4. 6
      brep2sdf/train.py

2
brep2sdf/config/default_config.py

@ -51,7 +51,7 @@ class TrainConfig:
num_workers: int = 4 num_workers: int = 4
num_epochs1: int = 0000 num_epochs1: int = 0000
num_epochs2: int = 000 num_epochs2: int = 000
num_epochs3: int = 100 num_epochs3: int = 3000
learning_rate: float = 0.1 learning_rate: float = 0.1
learning_rate_schedule: List = field(default_factory=lambda: [{ learning_rate_schedule: List = field(default_factory=lambda: [{
"Type": "Step", # 学习率调度类型。"Step"表示在指定迭代次数后将学习率乘以因子 "Type": "Step", # 学习率调度类型。"Step"表示在指定迭代次数后将学习率乘以因子

4
brep2sdf/networks/loss.py

@ -5,11 +5,11 @@ from brep2sdf.utils.logger import logger
class LossManager: class LossManager:
def __init__(self, ablation, **condition_kwargs): def __init__(self, ablation, **condition_kwargs):
self.weights = { self.weights = {
"manifold": 1, "manifold": 10,
"feature_manifold": 1, # 原文里面和manifold的权重是一样的 "feature_manifold": 1, # 原文里面和manifold的权重是一样的
"normals": 1, "normals": 1,
"eikonal": 1, "eikonal": 1,
"offsurface": 1, "offsurface": 10,
"consistency": 1, "consistency": 1,
"correction": 1, "correction": 1,
"psdf": 1, "psdf": 1,

61903
brep2sdf/nohup.out.5.20

File diff suppressed because it is too large

6
brep2sdf/train.py

@ -613,7 +613,7 @@ class Trainer:
self.model.train() self.model.train()
total_loss = 0.0 total_loss = 0.0
step = 0 # 如果你的训练是分批次的,这里应该用批次索引 step = 0 # 如果你的训练是分批次的,这里应该用批次索引
batch_size = 50000 # 设置合适的batch大小 batch_size = 25000 # 设置合适的batch大小
# 数据处理 # 数据处理
# manfld # manfld
@ -627,7 +627,7 @@ class Trainer:
_, _mnfld_face_indices_mask, _mnfld_operator = self.root.forward(_mnfld_pnts) _, _mnfld_face_indices_mask, _mnfld_operator = self.root.forward(_mnfld_pnts)
# 生成非流形点 # 生成非流形点
_nonmnfld_pnts, _psdf = self.sampler.get_norm_points(_mnfld_pnts, _normals, 0.01) _nonmnfld_pnts, _psdf = self.sampler.get_norm_points(_mnfld_pnts, _normals, 0.1)
_, _nonmnfld_face_indices_mask, _nonmnfld_operator = self.root.forward(_nonmnfld_pnts) _, _nonmnfld_face_indices_mask, _nonmnfld_operator = self.root.forward(_nonmnfld_pnts)
# 更新缓存 # 更新缓存
@ -905,7 +905,7 @@ class Trainer:
return total_loss # 对于单批次训练,直接返回当前损失 return total_loss # 对于单批次训练,直接返回当前损失
def validate(self, epoch, loss): def validate(self, epoch, loss):
if loss < self.best_loss: if epoch > self.config.train.num_epochs3 / 5 and loss < self.best_loss:
self.best_loss = loss self.best_loss = loss
self._save_checkpoint(-1, loss) # 存 best self._save_checkpoint(-1, loss) # 存 best
logger.info(f'Best Epoch: {epoch}\tAverage Loss: {loss:.6f}') logger.info(f'Best Epoch: {epoch}\tAverage Loss: {loss:.6f}')

Loading…
Cancel
Save