diff --git a/brep2sdf/config/default_config.py b/brep2sdf/config/default_config.py index e0fa122..c2b5f80 100644 --- a/brep2sdf/config/default_config.py +++ b/brep2sdf/config/default_config.py @@ -50,14 +50,14 @@ class TrainConfig: batch_size: int = 8 num_workers: int = 4 num_epochs1: int = 10000 - num_epochs2: int = 1000 - num_epochs3: int = 1000 + num_epochs2: int = 0 + num_epochs3: int = 0 learning_rate: float = 0.1 learning_rate_schedule: List = field(default_factory=lambda: [{ "Type": "Step", # 学习率调度类型。"Step"表示在指定迭代次数后将学习率乘以因子 "Initial": 0.01, "Interval": 2000, - "Factor": 0.3 + "Factor": 0.7 }]) min_lr: float = 1e-5 weight_decay: float = 0.0001 diff --git a/brep2sdf/networks/decoder.py b/brep2sdf/networks/decoder.py index f48f19a..5397115 100644 --- a/brep2sdf/networks/decoder.py +++ b/brep2sdf/networks/decoder.py @@ -41,7 +41,7 @@ class Decoder(nn.Module): lin = nn.Linear(dims_sdf[layer], out_dim) if geometric_init: if layer == self.sdf_layers - 2: - torch.nn.init.normal_(lin.weight, mean=np.sqrt(np.pi) / np.sqrt(dims_sdf[layer]), std=0.00001) + torch.nn.init.normal_(lin.weight, mean=np.sqrt(np.pi) / np.sqrt(dims_sdf[layer]), std=0.1) torch.nn.init.constant_(lin.bias, -radius_init) else: torch.nn.init.constant_(lin.bias, 0.0) @@ -59,13 +59,13 @@ class Decoder(nn.Module): nn.Softplus(beta=beta) ) if beta > 0: - self.activation = nn.Softplus(beta=beta) + self.activation = nn.SiLU() # vanilla relu else: self.activation = nn.ReLU() else: #siren - self.activation = Sine() + self.activation = nn.SiLU() self.final_activation = nn.Tanh() def forward(self, feature_matrix: torch.Tensor) -> torch.Tensor: diff --git a/brep2sdf/networks/loss.py b/brep2sdf/networks/loss.py index 26d96c2..5f07718 100644 --- a/brep2sdf/networks/loss.py +++ b/brep2sdf/networks/loss.py @@ -182,6 +182,49 @@ class LossManager: return total_loss, loss_details + def compute_loss_stage1(self, + mnfld_pnts, + normals, + gt_sdfs, + mnfld_pred, + ): + """ + 计算流型损失的逻辑 + + :param outputs: 模型的输出 + :return: 计算得到的流型损失值 + """ + # 强制类型转换确保一致性 + normals = normals.to(torch.float32) + mnfld_pred = mnfld_pred.to(torch.float32) + gt_sdfs = gt_sdfs.to(torch.float32) + + # 计算流形损失 + manifold_loss = self.position_loss(mnfld_pred, gt_sdfs) + + # 计算法线损失 + normals_loss = self.normals_loss(normals, mnfld_pnts, mnfld_pred) + #logger.gpu_memory_stats("计算法线损失后") + + + # 计算一致性损失 + #onsistency_loss = self.consistency_loss(mnfld_pnts, mnfld_pred, all_fi) + + # 计算修正损失 + #correction_loss = self.correction_loss(mnfld_pnts, mnfld_pred, all_fi) + + + # 汇总损失 + loss_details = { + "manifold": self.weights["manifold"] * manifold_loss, + "normals": self.weights["normals"] * normals_loss + } + + # 计算总损失 + total_loss = sum(loss_details.values()) + + return total_loss, loss_details + def compute_loss_volume(self, mnfld_pnts, nonmnfld_pnts, diff --git a/brep2sdf/networks/network.py b/brep2sdf/networks/network.py index b921940..bfbe240 100644 --- a/brep2sdf/networks/network.py +++ b/brep2sdf/networks/network.py @@ -58,8 +58,8 @@ class Net(nn.Module): volume_bboxs, feature_dim=8, decoder_output_dim=1, - decoder_hidden_dim=256, - decoder_num_layers=4, + decoder_hidden_dim=512, + decoder_num_layers=6, decoder_activation='relu', decoder_skip_connections=True): @@ -77,7 +77,7 @@ class Net(nn.Module): self.decoder = Decoder( d_in=feature_dim, dims_sdf=[decoder_hidden_dim] * decoder_num_layers, - geometric_init=True, + geometric_init=False, beta=5 )