2 changed files with 194 additions and 0 deletions
@ -0,0 +1,194 @@ |
|||
import os |
|||
import numpy as np |
|||
from collections import defaultdict |
|||
from tqdm import tqdm |
|||
import pickle |
|||
from brep2sdf.scripts.process_brep import parse_solid |
|||
from brep2sdf.utils.logger import logger |
|||
|
|||
def analyze_brep_stats(step_path_list): |
|||
"""分析BRep文件的统计信息 |
|||
|
|||
Args: |
|||
step_path_list: STEP文件路径列表 |
|||
|
|||
Returns: |
|||
stats_dict: 包含统计信息的字典 |
|||
""" |
|||
stats = defaultdict(list) |
|||
failed_files = [] |
|||
|
|||
for step_path in tqdm(step_path_list, desc="分析BRep文件"): |
|||
try: |
|||
# 解析STEP文件 |
|||
data = parse_solid(step_path) |
|||
if data is None: |
|||
failed_files.append(os.path.basename(step_path)) |
|||
continue |
|||
|
|||
# 收集统计信息 |
|||
stats['num_faces'].append(len(data['surf_wcs'])) |
|||
stats['num_edges'].append(len(data['edge_wcs'])) |
|||
stats['num_vertices'].append(len(data['corner_unique'])) |
|||
|
|||
# 计算每个面的点数 |
|||
face_points = [len(face) for face in data['surf_wcs']] |
|||
stats['points_per_face'].extend(face_points) |
|||
|
|||
# 计算每条边的采样点数 |
|||
edge_points = [len(edge) for edge in data['edge_wcs']] |
|||
stats['points_per_edge'].extend(edge_points) |
|||
|
|||
except Exception as e: |
|||
logger.error(f"处理文件 {step_path} 时出错: {str(e)}") |
|||
failed_files.append(os.path.basename(step_path)) |
|||
continue |
|||
|
|||
# 计算最终统计结果 |
|||
stats_dict = { |
|||
'total_files': len(step_path_list), |
|||
'successful_files': len(step_path_list) - len(failed_files), |
|||
'failed_files': failed_files, |
|||
'success_rate': (len(step_path_list) - len(failed_files)) / len(step_path_list) * 100, |
|||
|
|||
'face_stats': { |
|||
'min': np.min(stats['num_faces']), |
|||
'max': np.max(stats['num_faces']), |
|||
'mean': np.mean(stats['num_faces']), |
|||
'median': np.median(stats['num_faces']), |
|||
'std': np.std(stats['num_faces']) |
|||
}, |
|||
|
|||
'edge_stats': { |
|||
'min': np.min(stats['num_edges']), |
|||
'max': np.max(stats['num_edges']), |
|||
'mean': np.mean(stats['num_edges']), |
|||
'median': np.median(stats['num_edges']), |
|||
'std': np.std(stats['num_edges']) |
|||
}, |
|||
|
|||
'vertex_stats': { |
|||
'min': np.min(stats['num_vertices']), |
|||
'max': np.max(stats['num_vertices']), |
|||
'mean': np.mean(stats['num_vertices']), |
|||
'median': np.median(stats['num_vertices']), |
|||
'std': np.std(stats['num_vertices']) |
|||
}, |
|||
|
|||
'face_points_stats': { |
|||
'min': np.min(stats['points_per_face']), |
|||
'max': np.max(stats['points_per_face']), |
|||
'mean': np.mean(stats['points_per_face']), |
|||
'median': np.median(stats['points_per_face']), |
|||
'std': np.std(stats['points_per_face']) |
|||
}, |
|||
|
|||
'edge_points_stats': { |
|||
'min': np.min(stats['points_per_edge']), |
|||
'max': np.max(stats['points_per_edge']), |
|||
'mean': np.mean(stats['points_per_edge']), |
|||
'median': np.median(stats['points_per_edge']), |
|||
'std': np.std(stats['points_per_edge']) |
|||
} |
|||
} |
|||
|
|||
return stats_dict |
|||
|
|||
def print_stats(stats_dict): |
|||
"""打印统计信息""" |
|||
logger.info("\n=== BRep文件统计信息 ===") |
|||
|
|||
# 基本信息 |
|||
logger.info(f"\n处理文件总数: {stats_dict['total_files']}") |
|||
logger.info(f"成功处理文件数: {stats_dict['successful_files']}") |
|||
logger.info(f"失败文件数: {len(stats_dict['failed_files'])}") |
|||
logger.info(f"成功率: {stats_dict['success_rate']:.2f}%") |
|||
|
|||
# 几何元素统计 |
|||
for name, stats in [ |
|||
('面', stats_dict['face_stats']), |
|||
('边', stats_dict['edge_stats']), |
|||
('顶点', stats_dict['vertex_stats']) |
|||
]: |
|||
logger.info(f"\n{name}统计:") |
|||
logger.info(f" 最小值: {stats['min']}") |
|||
logger.info(f" 最大值: {stats['max']}") |
|||
logger.info(f" 平均值: {stats['mean']:.2f}") |
|||
logger.info(f" 中位数: {stats['median']}") |
|||
logger.info(f" 标准差: {stats['std']:.2f}") |
|||
|
|||
# 点云统计 |
|||
for name, stats in [ |
|||
('面点云', stats_dict['face_points_stats']), |
|||
('边点云', stats_dict['edge_points_stats']) |
|||
]: |
|||
logger.info(f"\n{name}统计:") |
|||
logger.info(f" 最小点数: {stats['min']}") |
|||
logger.info(f" 最大点数: {stats['max']}") |
|||
logger.info(f" 平均点数: {stats['mean']:.2f}") |
|||
logger.info(f" 中位数点数: {stats['median']}") |
|||
logger.info(f" 点数标准差: {stats['std']:.2f}") |
|||
|
|||
# 失败文件列表 |
|||
if stats_dict['failed_files']: |
|||
logger.info("\n失败文件列表:") |
|||
for file in stats_dict['failed_files']: |
|||
logger.info(f" - {file}") |
|||
|
|||
def get_step_files(root_dir): |
|||
"""递归获取所有STEP文件路径 |
|||
|
|||
Args: |
|||
root_dir: 数据集根目录 |
|||
|
|||
Returns: |
|||
dict: 按数据集分割的文件路径字典 |
|||
""" |
|||
step_files_dict = {'train': [], 'val': [], 'test': []} |
|||
|
|||
for split in ['train', 'val', 'test']: |
|||
split_dir = os.path.join(root_dir, split) |
|||
if not os.path.exists(split_dir): |
|||
logger.warning(f"目录不存在: {split_dir}") |
|||
continue |
|||
|
|||
files = [os.path.join(split_dir, f) |
|||
for f in os.listdir(split_dir) |
|||
if f.endswith('.step')] |
|||
step_files_dict[split] = files |
|||
logger.info(f"找到 {split} 集 STEP 文件: {len(files)} 个") |
|||
|
|||
return step_files_dict |
|||
|
|||
def main(): |
|||
"""主函数""" |
|||
# 数据集根目录 |
|||
root_dir = '/mnt/mynewdisk/dataset/furniture/step/furniture_dataset_step/' |
|||
if not os.path.exists(root_dir): |
|||
logger.error(f"数据集根目录不存在: {root_dir}") |
|||
return |
|||
|
|||
# 获取所有数据集的STEP文件 |
|||
step_files_dict = get_step_files(root_dir) |
|||
|
|||
# 统计每个数据集的信息 |
|||
for split, files in step_files_dict.items(): |
|||
if not files: |
|||
logger.warning(f"{split} 集没有找到STEP文件") |
|||
continue |
|||
|
|||
logger.info(f"\n=== {split.upper()} 集统计 ===") |
|||
stats = analyze_brep_stats(files) |
|||
print_stats(stats) |
|||
|
|||
# 保存统计结果 |
|||
save_dir = '/home/wch/brep2sdf/test_data/stats' |
|||
os.makedirs(save_dir, exist_ok=True) |
|||
save_path = os.path.join(save_dir, f'{split}_stats.pkl') |
|||
with open(save_path, 'wb') as f: |
|||
pickle.dump(stats, f) |
|||
logger.info(f"统计结果已保存到: {save_path}") |
|||
|
|||
if __name__ == '__main__': |
|||
main() |
|||
|
Loading…
Reference in new issue