Browse Source

feat: Add configurable CSV output filename for evaluation results

- Introduced a new command-line argument `--csv_name` to allow custom CSV filename for evaluation results
- Updated `compute_all()` function to use the dynamically specified CSV filename
- Improved flexibility of evaluation script by enabling users to specify output file name
- Maintained existing logging and error handling mechanisms
NH-Rep
mckay 4 weeks ago
parent
commit
77c4e0dc37
  1. 5
      code/evaluation/evaluation.py

5
code/evaluation/evaluation.py

@ -30,6 +30,7 @@ parser.add_argument('--pred_path', type=str,
parser.add_argument('--name_list', type=str, default='broken_bullet_name.txt', help='names of models to be evaluated, if you want to evaluate the whole dataset, please set it as all_names.txt')
parser.add_argument('--nsample', type=int, default=50000, help='point batch size')
parser.add_argument('--regen', default = False, action="store_true", help = 'regenerate feature curves')
parser.add_argument('--csv_name', type=str, default='eval_results.csv', help='csv file name')
args = parser.parse_args()
def distance_p2p(points_src, normals_src, points_tgt, normals_tgt):
@ -230,9 +231,9 @@ def compute_all():
# 保存结果
df = pd.DataFrame(results)
df.to_csv('eval_results.csv', index=False)
df.to_csv(args.csv_name, index=False)
logger.info(f"Evaluation completed. Results saved to {os.path.abspath('eval_results.csv')}")
logger.info(f"Evaluation completed. Results saved to {os.path.abspath(args.csv_name)}")
except Exception as e:
logger.error(f"Error in compute_all: {str(e)}")

Loading…
Cancel
Save