Browse Source

Refactor evaluation.py to improve model evaluation process and error handling

- Introduced a new function `load_and_process_single_model` to encapsulate the logic for evaluating a single model, enhancing code readability and maintainability.
- Updated `compute_all` to utilize the new function, streamlining the overall evaluation workflow.
- Improved error handling with logging for missing files and exceptions during processing.
- Enhanced caching mechanism for computed results to avoid redundant calculations.
- Added detailed comments and documentation for better understanding of the evaluation process.
main
mckay 2 months ago
parent
commit
aa76cc950b
  1. 154
      code/evaluation/evaluation.py

154
code/evaluation/evaluation.py

@ -129,92 +129,114 @@ def distance_fea(gt_pa, pred_pa):
return dfg2p, dfp2g, fag2p, fap2g
def compute_all():
gt_path = args.gt_path
pred_mesh_path = args.pred_path
namelst = args.name_list
output_path = 'eval_results.csv'
with open(os.path.join(project_dir, 'evaluation', namelst), 'r') as f:
lines = f.readlines()
d = {'name':[], 'CD':[], 'HD':[], 'HDgt2pred':[], 'HDpred2gt':[], 'AngleDiffMean':[], 'AngleDiffStd':[], 'FeaDfgt2pred':[], 'FeaDfpred2gt':[], 'FeaDf':[], 'FeaAnglegt2pred':[], 'FeaAnglepred2gt':[], 'FeaAngle':[]}
for line in lines:
def load_and_process_single_model(line, gt_path, pred_mesh_path, args):
"""处理单个模型的评估
Args:
line (str): 模型名称
gt_path (str): 真值路径
pred_mesh_path (str): 预测网格路径
args: 参数配置
Returns:
dict: 包含该模型所有评估指标的字典
"""
try:
line = line.strip()[:-4]
print(line)
result = {'name': line}
# 加载点云数据
test_xyz = os.path.join(gt_path, line+'_50k.xyz')
ptnormal = np.loadtxt(test_xyz)
# 加载预测网格
meshfile = os.path.join(pred_mesh_path, '{}_50k.ply'.format(line))
if not os.path.exists(meshfile):
print('file not exists: ', meshfile)
f = open(meshfile + 'noexists', 'w')
f.close()
continue
logger.warning(f'File not exists: {meshfile}')
return None
# 检查缓存
stat_file = meshfile + "_stat"
if not args.regen and os.path.exists(stat_file) and os.path.getsize(stat_file) > 0:
#load compuated ones
f = open(stat_file, 'rb')
cur_dict = pickle.load(f)
for k in cur_dict:
d[k].append(cur_dict[k])
f.close()
continue
d['name'].append(line)
with open(stat_file, 'rb') as f:
return pickle.load(f)
# 计算网格距离指标
mesh = trimesh.load(meshfile)
cd, hd, adm, ads, hd_pred2gt, hd_gt2pred = distance_p2mesh(
ptnormal[:,:3], ptnormal[:,3:], mesh)
result.update({
'CD': cd, 'HD': hd, 'HDpred2gt': hd_pred2gt,
'HDgt2pred': hd_gt2pred, 'AngleDiffMean': adm,
'AngleDiffStd': ads
})
cd, hd, adm, ads, hd_pred2gt, hd_gt2pred = distance_p2mesh(ptnormal[:,:3], ptnormal[:,3:], mesh)
d['CD'].append(cd)
d['HD'].append(hd)
d['HDpred2gt'].append(hd_pred2gt)
d['HDgt2pred'].append(hd_gt2pred)
d['AngleDiffMean'].append(adm)
d['AngleDiffStd'].append(ads)
# 计算特征点指标
gt_ptangle = np.loadtxt(os.path.join(gt_path, line + '_detectfea4e-3.ptangle'))
pred_ptangle_path = meshfile[:-4]+'_4e-3.ptangle'
if not os.path.exists(pred_ptangle_path) or args.regen:
os.system('./evaluation/MeshFeatureSample/build/SimpleSample -i {} -o {} -s 4e-3'.format(meshfile, pred_ptangle_path))
os.system('./evaluation/MeshFeatureSample/build/SimpleSample -i {} -o {} -s 4e-3'.format(
meshfile, pred_ptangle_path))
pred_ptangle = np.loadtxt(pred_ptangle_path).reshape(-1,4)
#for smooth case: if gt fea is empty, or pred fea is empty, then return 0
# 处理特征点结果
if len(gt_ptangle) == 0 or len(pred_ptangle) == 0:
d['FeaDfgt2pred'].append(0.0)
d['FeaDfpred2gt'].append(0.0)
d['FeaAnglegt2pred'].append(0.0)
d['FeaAnglepred2gt'].append(0.0)
d['FeaDf'].append(0.0)
d['FeaAngle'].append(0.0)
result.update({
'FeaDfgt2pred': 0.0, 'FeaDfpred2gt': 0.0,
'FeaAnglegt2pred': 0.0, 'FeaAnglepred2gt': 0.0,
'FeaDf': 0.0, 'FeaAngle': 0.0
})
else:
dfg2p, dfp2g, fag2p, fap2g = distance_fea(gt_ptangle, pred_ptangle)
d['FeaDfgt2pred'].append(dfg2p)
d['FeaDfpred2gt'].append(dfp2g)
d['FeaAnglegt2pred'].append(fag2p)
d['FeaAnglepred2gt'].append(fap2g)
d['FeaDf'].append((dfg2p + dfp2g) / 2.0)
d['FeaAngle'].append((fag2p + fap2g) / 2.0)
cur_d = {}
for k in d:
cur_d[k] = d[k][-1]
f = open(stat_file,"wb")
pickle.dump(cur_d, f)
f.close()
result.update({
'FeaDfgt2pred': dfg2p, 'FeaDfpred2gt': dfp2g,
'FeaAnglegt2pred': fag2p, 'FeaAnglepred2gt': fap2g,
'FeaDf': (dfg2p + dfp2g) / 2.0,
'FeaAngle': (fag2p + fap2g) / 2.0
})
# 保存缓存
with open(stat_file, "wb") as f:
pickle.dump(result, f)
return result
except Exception as e:
logger.error(f"Error processing {line}: {str(e)}")
return None
d['name'].append('mean')
for key in d:
if key != 'name':
d[key].append(sum(d[key])/len(d[key]))
df = pd.DataFrame(d, columns=['name', 'CD', 'HD', 'HDpred2gt', 'HDgt2pred', 'AngleDiffMean', 'AngleDiffStd','FeaDfgt2pred', 'FeaDfpred2gt', 'FeaDf', 'FeaAnglegt2pred', 'FeaAnglepred2gt', 'FeaAngle'])
df.to_csv(output_path, index = False, header=True)
def compute_all():
"""计算所有模型的评估指标"""
try:
# 初始化结果字典
results = []
# 读取模型列表
with open(os.path.join(project_dir, 'evaluation', args.name_list), 'r') as f:
lines = f.readlines()
# 处理每个模型
for line in lines:
result = load_and_process_single_model(line, args.gt_path, args.pred_path, args)
if result:
results.append(result)
# 计算平均值
mean_result = {'name': 'mean'}
for key in results[0].keys():
if key != 'name':
mean_result[key] = sum(r[key] for r in results) / len(results)
results.append(mean_result)
# 保存结果
df = pd.DataFrame(results)
df.to_csv('eval_results.csv', index=False)
logger.info(f"Evaluation completed. Results saved to {os.path.abspath('eval_results.csv')}")
except Exception as e:
logger.error(f"Error in compute_all: {str(e)}")
raise
if __name__ == '__main__':
compute_all()
Loading…
Cancel
Save