|
|
@ -43,20 +43,20 @@ def normalize(surfs, edges, corners): |
|
|
|
参数: |
|
|
|
surfs: 面的点集列表 |
|
|
|
edges: 边的点集列表 |
|
|
|
corners: 顶点坐标列表 |
|
|
|
corners: 顶点坐标数组 [num_edges, 2, 3] |
|
|
|
|
|
|
|
返回: |
|
|
|
surfs_wcs: 原始坐标系下的面点集 |
|
|
|
edges_wcs: 原始坐标系下的边点集 |
|
|
|
surfs_ncs: 归一化坐标系下的面点集 |
|
|
|
edges_ncs: 归一化坐标系下的边点集 |
|
|
|
corner_wcs: 归一化后的顶点坐标 |
|
|
|
corner_wcs: 归一化后的顶点坐标 [num_edges, 2, 3] |
|
|
|
""" |
|
|
|
if len(corners) == 0: |
|
|
|
return None, None, None, None, None |
|
|
|
|
|
|
|
# 计算包围盒和缩放因子 |
|
|
|
corners_array = np.array(corners) |
|
|
|
corners_array = corners.reshape(-1, 3) # [num_edges*2, 3] |
|
|
|
center = (corners_array.max(0) + corners_array.min(0)) / 2 # 计算中心点 |
|
|
|
scale = 1.0 / (corners_array.max(0) - corners_array.min(0)).max() # 计算缩放系数 |
|
|
|
|
|
|
@ -78,8 +78,8 @@ def normalize(surfs, edges, corners): |
|
|
|
edges_wcs.append(edge_wcs) |
|
|
|
edges_ncs.append(edge_ncs) |
|
|
|
|
|
|
|
# 归一化顶点坐标 |
|
|
|
corner_wcs = (corners_array - center) * scale |
|
|
|
# 归一化顶点坐标 - 保持[num_edges, 2, 3]的形状 |
|
|
|
corner_wcs = (corners - center) * scale # 广播操作会保持原有维度 |
|
|
|
|
|
|
|
return (np.array(surfs_wcs, dtype=object), |
|
|
|
np.array(edges_wcs, dtype=object), |
|
|
@ -194,8 +194,8 @@ def parse_solid(step_path): |
|
|
|
'edge_wcs': np.ndarray(dtype=object) # 形状为(N,)的数组,每个元素是形状为(100, 3)的float32数组,表示边的采样点坐标 |
|
|
|
'surf_ncs': np.ndarray(dtype=object) # 形状为(N,)的数组,每个元素是形状为(M, 3)的float32数组,表示归一化后的面点云 |
|
|
|
'edge_ncs': np.ndarray(dtype=object) # 形状为(N,)的数组,每个元素是形状为(100, 3)的float32数组,表示归一化后的边采样点 |
|
|
|
'corner_wcs': np.ndarray(dtype=float32) # 形状为(K, 3)的数组,表示所有顶点的坐标 |
|
|
|
'corner_unique': np.ndarray(dtype=float32) # 形状为(L, 3)的数组,表示去重后的顶点坐标 |
|
|
|
'corner_wcs': np.ndarray(dtype=float32) # 形状为(num_edges, 2, 3)的数组,表示每条边的两个端点坐标 |
|
|
|
'corner_unique': np.ndarray(dtype=float32) # 形状为(num_vertices, 3)的数组,表示所有顶点的唯一坐标,num_vertices <= num_edges * 2 |
|
|
|
|
|
|
|
# 拓扑关系 |
|
|
|
'edgeFace_adj': np.ndarray(dtype=int32) # 形状为(num_edges, num_faces)的数组,表示边-面邻接关系 |
|
|
@ -282,15 +282,29 @@ def parse_solid(step_path): |
|
|
|
# 获取邻接信息 |
|
|
|
edgeFace_adj, faceEdge_adj, edgeCorner_adj = get_adjacency_info(shape) |
|
|
|
|
|
|
|
# 转换为numpy数组,但保持列表形式 |
|
|
|
face_pnts = list(face_pnts) # 确保是列表 |
|
|
|
edge_pnts = list(edge_pnts) # 确保是列表 |
|
|
|
corner_pnts = np.array(corner_pnts, dtype=np.float32) |
|
|
|
# 转换为numpy数组 |
|
|
|
face_pnts = list(face_pnts) |
|
|
|
edge_pnts = list(edge_pnts) |
|
|
|
corner_pnts = np.array(corner_pnts, dtype=np.float32) # [num_vertices, 3] |
|
|
|
|
|
|
|
# 重组顶点数据为每条边两个端点的形式 |
|
|
|
corner_pairs = [] |
|
|
|
for edge_idx in range(len(edge_pnts)): |
|
|
|
v1_idx, v2_idx = edgeCorner_adj[edge_idx] |
|
|
|
v1_pos = corner_pnts[v1_idx] |
|
|
|
v2_pos = corner_pnts[v2_idx] |
|
|
|
# 按坐标排序确保顺序一致 |
|
|
|
if (v1_pos > v2_pos).any(): |
|
|
|
v1_pos, v2_pos = v2_pos, v1_pos |
|
|
|
corner_pairs.append(np.stack([v1_pos, v2_pos])) |
|
|
|
|
|
|
|
corner_pairs = np.stack(corner_pairs) # [num_edges, 2, 3] |
|
|
|
surf_bbox_wcs = np.array(surf_bbox_wcs, dtype=np.float32) |
|
|
|
edge_bbox_wcs = np.array(edge_bbox_wcs, dtype=np.float32) |
|
|
|
|
|
|
|
# Normalize the CAD model |
|
|
|
surfs_wcs, edges_wcs, surfs_ncs, edges_ncs, corner_wcs = normalize(face_pnts, edge_pnts, corner_pnts) |
|
|
|
surfs_wcs, edges_wcs, surfs_ncs, edges_ncs, corner_wcs = normalize( |
|
|
|
face_pnts, edge_pnts, corner_pairs) |
|
|
|
|
|
|
|
# Create result dictionary |
|
|
|
data = { |
|
|
@ -298,13 +312,13 @@ def parse_solid(step_path): |
|
|
|
'edge_wcs': edges_wcs, |
|
|
|
'surf_ncs': surfs_ncs, |
|
|
|
'edge_ncs': edges_ncs, |
|
|
|
'corner_wcs': corner_wcs.astype(np.float32), |
|
|
|
'corner_wcs': corner_wcs, # [num_edges, 2, 3] |
|
|
|
'edgeFace_adj': edgeFace_adj, |
|
|
|
'edgeCorner_adj': edgeCorner_adj, |
|
|
|
'faceEdge_adj': faceEdge_adj, |
|
|
|
'surf_bbox_wcs': surf_bbox_wcs, |
|
|
|
'edge_bbox_wcs': edge_bbox_wcs, |
|
|
|
'corner_unique': np.unique(corner_wcs, axis=0).astype(np.float32) |
|
|
|
'corner_unique': np.unique(corner_wcs.reshape(-1, 3), axis=0).astype(np.float32) # 先展平再去重 |
|
|
|
} |
|
|
|
|
|
|
|
return data |
|
|
@ -316,18 +330,91 @@ def load_step(step_path): |
|
|
|
reader.TransferRoots() |
|
|
|
return [reader.OneShape()] |
|
|
|
|
|
|
|
def process_single_step( |
|
|
|
step_path:str, |
|
|
|
output_path:str=None, |
|
|
|
timeout:int=300 |
|
|
|
) -> dict: |
|
|
|
"""Process single STEP file""" |
|
|
|
def check_data_format(data, step_file): |
|
|
|
"""检查数据格式和维度是否符合要求""" |
|
|
|
try: |
|
|
|
# 检查必需的键 |
|
|
|
required_keys = [ |
|
|
|
# 几何数据 |
|
|
|
'surf_wcs', 'edge_wcs', 'surf_ncs', 'edge_ncs', |
|
|
|
'corner_wcs', 'corner_unique', |
|
|
|
# 拓扑关系 |
|
|
|
'edgeFace_adj', 'edgeCorner_adj', 'faceEdge_adj', |
|
|
|
# 包围盒数据 |
|
|
|
'surf_bbox_wcs', 'edge_bbox_wcs' |
|
|
|
] |
|
|
|
|
|
|
|
# 检查键是否存在 |
|
|
|
for key in required_keys: |
|
|
|
if key not in data: |
|
|
|
return False, f"Missing required key: {key}" |
|
|
|
|
|
|
|
# 检查几何数据 |
|
|
|
geometry_arrays = ['surf_wcs', 'edge_wcs', 'surf_ncs', 'edge_ncs'] |
|
|
|
for key in geometry_arrays: |
|
|
|
if not isinstance(data[key], np.ndarray) or data[key].dtype != object: |
|
|
|
return False, f"{key} should be a numpy array with dtype=object" |
|
|
|
|
|
|
|
# 检查顶点数据 |
|
|
|
if not isinstance(data['corner_wcs'], np.ndarray) or data['corner_wcs'].dtype != np.float32: |
|
|
|
return False, "corner_wcs should be a numpy array with dtype=float32" |
|
|
|
if len(data['corner_wcs'].shape) != 3 or data['corner_wcs'].shape[1:] != (2, 3): |
|
|
|
return False, f"corner_wcs should have shape (num_edges, 2, 3), got {data['corner_wcs'].shape}" |
|
|
|
|
|
|
|
if not isinstance(data['corner_unique'], np.ndarray) or data['corner_unique'].dtype != np.float32: |
|
|
|
return False, "corner_unique should be a numpy array with dtype=float32" |
|
|
|
if len(data['corner_unique'].shape) != 2 or data['corner_unique'].shape[1] != 3: |
|
|
|
return False, f"corner_unique should have shape (N, 3), got {data['corner_unique'].shape}" |
|
|
|
|
|
|
|
# 检查拓扑关系 |
|
|
|
num_faces = len(data['surf_wcs']) |
|
|
|
num_edges = len(data['edge_wcs']) |
|
|
|
|
|
|
|
# 检查邻接矩阵 |
|
|
|
adj_checks = [ |
|
|
|
('edgeFace_adj', (num_edges, num_faces)), |
|
|
|
('faceEdge_adj', (num_faces, num_edges)), |
|
|
|
('edgeCorner_adj', (num_edges, 2)) |
|
|
|
] |
|
|
|
|
|
|
|
for key, expected_shape in adj_checks: |
|
|
|
if not isinstance(data[key], np.ndarray) or data[key].dtype != np.int32: |
|
|
|
return False, f"{key} should be a numpy array with dtype=int32" |
|
|
|
if data[key].shape != expected_shape: |
|
|
|
return False, f"{key} shape mismatch: expected {expected_shape}, got {data[key].shape}" |
|
|
|
|
|
|
|
# 检查包围盒数据 |
|
|
|
bbox_checks = [ |
|
|
|
('surf_bbox_wcs', (num_faces, 6)), |
|
|
|
('edge_bbox_wcs', (num_edges, 6)) |
|
|
|
] |
|
|
|
|
|
|
|
for key, expected_shape in bbox_checks: |
|
|
|
if not isinstance(data[key], np.ndarray) or data[key].dtype != np.float32: |
|
|
|
return False, f"{key} should be a numpy array with dtype=float32" |
|
|
|
if data[key].shape != expected_shape: |
|
|
|
return False, f"{key} shape mismatch: expected {expected_shape}, got {data[key].shape}" |
|
|
|
|
|
|
|
return True, "" |
|
|
|
|
|
|
|
except Exception as e: |
|
|
|
return False, f"Format check failed: {str(e)}" |
|
|
|
|
|
|
|
def process_single_step(step_path:str, output_path:str=None, timeout:int=300) -> dict: |
|
|
|
"""处理单个STEP文件""" |
|
|
|
try: |
|
|
|
# 解析STEP文件 |
|
|
|
data = parse_solid(step_path) |
|
|
|
if data is None: |
|
|
|
logger.error("Failed to parse STEP file") |
|
|
|
logger.error(f"Failed to parse STEP file: {step_path}") |
|
|
|
return None |
|
|
|
|
|
|
|
# 检查数据格式 |
|
|
|
is_valid, msg = check_data_format(data, step_path) |
|
|
|
if not is_valid: |
|
|
|
logger.error(f"Data format check failed for {step_path}: {msg}") |
|
|
|
return None |
|
|
|
|
|
|
|
# 保存结果 |
|
|
|
if output_path: |
|
|
|
try: |
|
|
@ -335,14 +422,17 @@ def process_single_step( |
|
|
|
os.makedirs(os.path.dirname(output_path), exist_ok=True) |
|
|
|
with open(output_path, 'wb') as f: |
|
|
|
pickle.dump(data, f) |
|
|
|
logger.info("Results saved successfully") |
|
|
|
logger.info(f"Results saved successfully: {output_path}") |
|
|
|
return data |
|
|
|
except Exception as e: |
|
|
|
logger.error(f'Not saving due to error: {str(e)}') |
|
|
|
logger.error(f'Failed to save {output_path}: {str(e)}') |
|
|
|
return None |
|
|
|
|
|
|
|
return data |
|
|
|
|
|
|
|
except Exception as e: |
|
|
|
logger.error(f'Not saving due to error: {str(e)}') |
|
|
|
return 0 |
|
|
|
logger.error(f'Error processing {step_path}: {str(e)}') |
|
|
|
return None |
|
|
|
|
|
|
|
def test(step_file_path, output_path=None): |
|
|
|
""" |
|
|
@ -354,27 +444,37 @@ def test(step_file_path, output_path=None): |
|
|
|
# 解析STEP文件 |
|
|
|
data = parse_solid(step_file_path) |
|
|
|
if data is None: |
|
|
|
logger.error("Failed to parse STEP file") |
|
|
|
logger.error(f"Failed to parse STEP file: {step_file_path}") |
|
|
|
return None |
|
|
|
|
|
|
|
# 检查数据格式 |
|
|
|
is_valid, msg = check_data_format(data, step_file_path) |
|
|
|
if not is_valid: |
|
|
|
logger.error(f"Data format check failed for {step_file_path}: {msg}") |
|
|
|
return None |
|
|
|
|
|
|
|
# 打印统计信息 |
|
|
|
logger.info("\nStatistics:") |
|
|
|
logger.info(f"Number of surfaces: {len(data['surf_wcs'])}") |
|
|
|
logger.info(f"Number of edges: {len(data['edge_wcs'])}") |
|
|
|
logger.info(f"Number of corners: {len(data['corner_unique'])}") |
|
|
|
logger.info(f"Number of corners: {len(data['corner_wcs'])}") # 修正为corner_wcs |
|
|
|
|
|
|
|
# 保存结果 |
|
|
|
if output_path: |
|
|
|
logger.info(f"Saving results to: {output_path}") |
|
|
|
os.makedirs(os.path.dirname(output_path), exist_ok=True) |
|
|
|
with open(output_path, 'wb') as f: |
|
|
|
pickle.dump(data, f) |
|
|
|
logger.info("Results saved successfully") |
|
|
|
try: |
|
|
|
logger.info(f"Saving results to: {output_path}") |
|
|
|
os.makedirs(os.path.dirname(output_path), exist_ok=True) |
|
|
|
with open(output_path, 'wb') as f: |
|
|
|
pickle.dump(data, f) |
|
|
|
logger.info(f"Results saved successfully: {output_path}") |
|
|
|
except Exception as e: |
|
|
|
logger.error(f"Failed to save {output_path}: {str(e)}") |
|
|
|
return None |
|
|
|
|
|
|
|
return data |
|
|
|
|
|
|
|
except Exception as e: |
|
|
|
logger.error(f"Error processing STEP file: {str(e)}") |
|
|
|
logger.error(f"Error processing {step_file_path}: {str(e)}") |
|
|
|
return None |
|
|
|
|
|
|
|
def process_furniture_step(data_path): |
|
|
@ -407,13 +507,26 @@ def process_furniture_step(data_path): |
|
|
|
|
|
|
|
|
|
|
|
def main(): |
|
|
|
""" |
|
|
|
主函数:处理多个STEP文件 |
|
|
|
""" |
|
|
|
"""主函数:处理多个STEP文件""" |
|
|
|
# 定义路径常量 |
|
|
|
INPUT = '/mnt/disk2/dataset/furniture/step/furniture_dataset_step/' |
|
|
|
OUTPUT = '../test_data/pkl/' |
|
|
|
RESULT = '../test_data/result/' # 用于存储成功/失败文件记录 |
|
|
|
RESULT = '../test_data/result/pkl/' # 用于存储成功/失败文件记录 |
|
|
|
|
|
|
|
# 清理输出目录 |
|
|
|
def clean_directory(directory): |
|
|
|
if os.path.exists(directory): |
|
|
|
logger.info(f"Cleaning directory: {directory}") |
|
|
|
for root, dirs, files in os.walk(directory, topdown=False): |
|
|
|
for name in files: |
|
|
|
os.remove(os.path.join(root, name)) |
|
|
|
for name in dirs: |
|
|
|
os.rmdir(os.path.join(root, name)) |
|
|
|
logger.info(f"Directory cleaned: {directory}") |
|
|
|
|
|
|
|
# 清理之前的输出 |
|
|
|
clean_directory(OUTPUT) |
|
|
|
clean_directory(RESULT) |
|
|
|
|
|
|
|
# 确保输出目录存在 |
|
|
|
os.makedirs(OUTPUT, exist_ok=True) |
|
|
@ -424,6 +537,9 @@ def main(): |
|
|
|
total_processed = 0 |
|
|
|
total_success = 0 |
|
|
|
|
|
|
|
# 记录开始时间 |
|
|
|
start_time = datetime.now() |
|
|
|
|
|
|
|
# 按数据集分割处理文件 |
|
|
|
for split in ['train', 'val', 'test']: |
|
|
|
current_step_dirs = step_dirs_dict[split] |
|
|
@ -431,14 +547,12 @@ def main(): |
|
|
|
logger.warning(f"No files found in {split} split") |
|
|
|
continue |
|
|
|
|
|
|
|
# 确保分割目录存在 |
|
|
|
# 确保输出目录存在 |
|
|
|
split_output_dir = os.path.join(OUTPUT, split) |
|
|
|
split_result_dir = os.path.join(RESULT, split) |
|
|
|
os.makedirs(split_output_dir, exist_ok=True) |
|
|
|
os.makedirs(split_result_dir, exist_ok=True) |
|
|
|
|
|
|
|
success_files = [] # 存储成功处理的文件名 |
|
|
|
failed_files = [] # 存储失败的文件名及原因 |
|
|
|
success_files = [] # 只存储基础文件名(不含扩展名) |
|
|
|
failed_files = [] # 只存储基础文件名(不含扩展名) |
|
|
|
|
|
|
|
# 并行处理文件 |
|
|
|
with ProcessPoolExecutor(max_workers=os.cpu_count() // 2) as executor: |
|
|
@ -452,39 +566,48 @@ def main(): |
|
|
|
# 处理结果 |
|
|
|
for future in tqdm(as_completed(futures), total=len(current_step_dirs), |
|
|
|
desc=f"Processing {split} set"): |
|
|
|
step_file = futures[future] |
|
|
|
base_name = step_file.replace('.step', '') # 获取不含扩展名的文件名 |
|
|
|
try: |
|
|
|
status = future.result(timeout=300) |
|
|
|
if status is not None: |
|
|
|
success_files.append(futures[future]) |
|
|
|
success_files.append(base_name) |
|
|
|
total_success += 1 |
|
|
|
except TimeoutError: |
|
|
|
logger.error(f"Timeout occurred while processing {futures[future]}") |
|
|
|
failed_files.append((futures[future], "Timeout")) |
|
|
|
except Exception as e: |
|
|
|
logger.error(f"Error processing {futures[future]}: {str(e)}") |
|
|
|
failed_files.append((futures[future], str(e))) |
|
|
|
else: |
|
|
|
failed_files.append(base_name) |
|
|
|
except (TimeoutError, Exception): |
|
|
|
failed_files.append(base_name) |
|
|
|
finally: |
|
|
|
total_processed += 1 |
|
|
|
|
|
|
|
# 保存成功文件列表 |
|
|
|
success_file_path = os.path.join(split_result_dir, 'success.txt') |
|
|
|
with open(success_file_path, 'w', encoding='utf-8') as f: |
|
|
|
# 保存处理结果 |
|
|
|
os.makedirs(RESULT, exist_ok=True) |
|
|
|
|
|
|
|
# 保存成功文件列表 (只保存文件名) |
|
|
|
success_path = os.path.join(RESULT, f'{split}_success.txt') |
|
|
|
with open(success_path, 'w') as f: |
|
|
|
f.write('\n'.join(success_files)) |
|
|
|
logger.info(f"Saved {len(success_files)} successful files to {success_file_path}") |
|
|
|
|
|
|
|
# 保存失败文件列表(包含错误信息) |
|
|
|
failed_file_path = os.path.join(split_result_dir, 'failed.txt') |
|
|
|
with open(failed_file_path, 'w', encoding='utf-8') as f: |
|
|
|
for file, error in failed_files: |
|
|
|
f.write(f"{file}: {error}\n") |
|
|
|
logger.info(f"Saved {len(failed_files)} failed files to {failed_file_path}") |
|
|
|
# 保存失败文件列表 (只保存文件名) |
|
|
|
failed_path = os.path.join(RESULT, f'{split}_failed.txt') |
|
|
|
with open(failed_path, 'w') as f: |
|
|
|
f.write('\n'.join(failed_files)) |
|
|
|
|
|
|
|
logger.info(f"{split} set - Success: {len(success_files)}, Failed: {len(failed_files)}") |
|
|
|
|
|
|
|
# 打印最终统计信息 |
|
|
|
end_time = datetime.now() |
|
|
|
duration = end_time - start_time |
|
|
|
|
|
|
|
if total_processed > 0: |
|
|
|
success_rate = (total_success / total_processed) * 100 |
|
|
|
logger.info(f"Processing completed:") |
|
|
|
logger.info("\nProcessing Summary:") |
|
|
|
logger.info(f"Start time: {start_time}") |
|
|
|
logger.info(f"End time: {end_time}") |
|
|
|
logger.info(f"Duration: {duration}") |
|
|
|
logger.info(f"Total files processed: {total_processed}") |
|
|
|
logger.info(f"Successfully processed: {total_success}") |
|
|
|
logger.info(f"Failed: {total_processed - total_success}") |
|
|
|
logger.info(f"Success rate: {success_rate:.2f}%") |
|
|
|
else: |
|
|
|
logger.warning("No files were processed") |
|
|
|