242 lines
8.6 KiB

import os
import sys
# 设置项目根目录
project_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
sys.path.append(project_dir)
os.chdir(project_dir)
# 导入日志系统
from utils.logger import logger
import numpy as np
from scipy.spatial import cKDTree
from scipy.spatial.distance import directed_hausdorff
import trimesh
import pandas as pd
import csv
import math
import pickle
import argparse
# parse args first and set gpu id
parser = argparse.ArgumentParser()
parser.add_argument('--gt_path', type=str,
default=os.path.join(project_dir, '../data/eval_data'),
help='ground truth data path')
parser.add_argument('--pred_path', type=str,
default=os.path.join(project_dir, '../data/output_data'),
help='converted data path')
parser.add_argument('--name_list', type=str, default='broken_bullet_name.txt', help='names of models to be evaluated, if you want to evaluate the whole dataset, please set it as all_names.txt')
parser.add_argument('--nsample', type=int, default=50000, help='point batch size')
parser.add_argument('--regen', default = False, action="store_true", help = 'regenerate feature curves')
args = parser.parse_args()
def distance_p2p(points_src, normals_src, points_tgt, normals_tgt):
''' Computes minimal distances of each point in points_src to points_tgt.
Args:
points_src (numpy array [N, 3]): source points
normals_src (numpy array [N, 3]): source normals
points_tgt (numpy array [M, 3]): target points
normals_tgt (numpy array [M, 3]): target
Returns:
dist (numpy array [N]): minimal distances of each point in points_src to points_tgt
normals_dot_product (numpy array [N]): dot product of normals of points_src and points_tgt
'''
kdtree = cKDTree(points_tgt)
dist, idx = kdtree.query(points_src)
if normals_src is not None and normals_tgt is not None:
normals_src = \
normals_src / np.linalg.norm(normals_src, axis=-1, keepdims=True)
normals_tgt = \
normals_tgt / np.linalg.norm(normals_tgt, axis=-1, keepdims=True)
normals_dot_product = (normals_tgt[idx] * normals_src).sum(axis=-1)
# Handle normals that point into wrong direction gracefully
# (mostly due to mehtod not caring about this in generation)
normals_dot_product = np.abs(normals_dot_product)
return dist, normals_dot_product
def distance_feature2mesh(points, mesh):
prox = trimesh.proximity.ProximityQuery(mesh)
signed_distance = prox.signed_distance(points)
return np.abs(signed_distance)
def distance_p2mesh(points_src, normals_src, mesh):
points_tgt, idx = mesh.sample(args.nsample, return_index=True)
points_tgt = points_tgt.astype(np.float32)
normals_tgt = mesh.face_normals[idx]
cd1, nc1 = distance_p2p(points_src, normals_src, points_tgt, normals_tgt) #pred2gt
hd1 = cd1.max()
cd1 = cd1.mean()
nc1 = np.clip(nc1, -1.0, 1.0)
angles1 = np.arccos(nc1) / math.pi * 180.0
angles1_mean = angles1.mean()
angles1_std = np.std(angles1)
cd2, nc2 = distance_p2p(points_tgt, normals_tgt, points_src, normals_src) #gt2pred
hd2 = cd2.max()
cd2 = cd2.mean()
nc2 = np.clip(nc2, -1.0, 1.0)
angles2 = np.arccos(nc2)/ math.pi * 180.0
angles2_mean = angles2.mean()
angles2_std = np.std(angles2)
cd = 0.5 * (cd1 + cd2)
hd = max(hd1, hd2)
angles_mean = 0.5 * (angles1_mean + angles2_mean)
angles_std = 0.5 * (angles1_std + angles2_std)
return cd, hd, angles_mean, angles_std, hd1, hd2
def distance_fea(gt_pa, pred_pa):
"""计算特征点之间的距离和角度差异
Args:
gt_pa: 真实特征点和角度 [N, 4]
pred_pa: 预测特征点和角度 [N, 4]
Returns:
dfg2p: 真实到预测的距离
dfp2g: 预测到真实的距离
fag2p: 真实到预测的角度差
fap2g: 预测到真实的角度差
"""
gt_points = gt_pa[:,:3]
pred_points = pred_pa[:,:3]
gt_angle = gt_pa[:,3]
pred_angle = pred_pa[:,3]
dfg2p = 0.0
dfp2g = 0.0
fag2p = 0.0
fap2g = 0.0
pred_kdtree = cKDTree(pred_points)
dist1, idx1 = pred_kdtree.query(gt_points)
dfg2p = dist1.mean()
assert(idx1.shape[0] == gt_points.shape[0])
fag2p = np.abs(gt_angle - pred_angle[idx1])
gt_kdtree = cKDTree(gt_points)
dist2, idx2 = gt_kdtree.query(pred_points)
dfp2g = dist2.mean()
fap2g = np.abs(pred_angle - gt_angle[idx2])
fag2p = fag2p.mean()
fap2g = fap2g.mean()
return dfg2p, dfp2g, fag2p, fap2g
def load_and_process_single_model(line, gt_path, pred_mesh_path, args):
"""处理单个模型的评估
Args:
line (str): 模型名称
gt_path (str): 真值路径
pred_mesh_path (str): 预测网格路径
args: 参数配置
Returns:
dict: 包含该模型所有评估指标的字典
"""
try:
line = line.strip()[:-4]
result = {'name': line}
# 加载点云数据
test_xyz = os.path.join(gt_path, line+'_50k.xyz')
ptnormal = np.loadtxt(test_xyz)
# 加载预测网格
meshfile = os.path.join(pred_mesh_path, '{}_50k.ply'.format(line))
if not os.path.exists(meshfile):
logger.warning(f'File not exists: {meshfile}')
return None
# 检查缓存
stat_file = meshfile + "_stat"
if not args.regen and os.path.exists(stat_file) and os.path.getsize(stat_file) > 0:
with open(stat_file, 'rb') as f:
return pickle.load(f)
# 计算网格距离指标
mesh = trimesh.load(meshfile)
cd, hd, adm, ads, hd_pred2gt, hd_gt2pred = distance_p2mesh(
ptnormal[:,:3], ptnormal[:,3:], mesh)
result.update({
'CD': cd, 'HD': hd, 'HDpred2gt': hd_pred2gt,
'HDgt2pred': hd_gt2pred, 'AngleDiffMean': adm,
'AngleDiffStd': ads
})
# 计算特征点指标
gt_ptangle = np.loadtxt(os.path.join(gt_path, line + '_detectfea4e-3.ptangle'))
pred_ptangle_path = meshfile[:-4]+'_4e-3.ptangle'
if not os.path.exists(pred_ptangle_path) or args.regen:
os.system('./evaluation/MeshFeatureSample/build/SimpleSample -i {} -o {} -s 4e-3'.format(
meshfile, pred_ptangle_path))
pred_ptangle = np.loadtxt(pred_ptangle_path).reshape(-1,4)
# 处理特征点结果
if len(gt_ptangle) == 0 or len(pred_ptangle) == 0:
result.update({
'FeaDfgt2pred': 0.0, 'FeaDfpred2gt': 0.0,
'FeaAnglegt2pred': 0.0, 'FeaAnglepred2gt': 0.0,
'FeaDf': 0.0, 'FeaAngle': 0.0
})
else:
dfg2p, dfp2g, fag2p, fap2g = distance_fea(gt_ptangle, pred_ptangle)
result.update({
'FeaDfgt2pred': dfg2p, 'FeaDfpred2gt': dfp2g,
'FeaAnglegt2pred': fag2p, 'FeaAnglepred2gt': fap2g,
'FeaDf': (dfg2p + dfp2g) / 2.0,
'FeaAngle': (fag2p + fap2g) / 2.0
})
# 保存缓存
with open(stat_file, "wb") as f:
pickle.dump(result, f)
return result
except Exception as e:
logger.error(f"Error processing {line}: {str(e)}")
return None
def compute_all():
"""计算所有模型的评估指标"""
try:
# 初始化结果字典
results = []
# 读取模型列表
with open(os.path.join(project_dir, 'evaluation', args.name_list), 'r') as f:
lines = f.readlines()
# 处理每个模型
for line in lines:
result = load_and_process_single_model(line, args.gt_path, args.pred_path, args)
if result:
results.append(result)
# 计算平均值
mean_result = {'name': 'mean'}
for key in results[0].keys():
if key != 'name':
mean_result[key] = sum(r[key] for r in results) / len(results)
results.append(mean_result)
# 保存结果
df = pd.DataFrame(results)
df.to_csv('eval_results.csv', index=False)
logger.info(f"Evaluation completed. Results saved to {os.path.abspath('eval_results.csv')}")
except Exception as e:
logger.error(f"Error in compute_all: {str(e)}")
raise
if __name__ == '__main__':
compute_all()