2 changed files with 1675 additions and 0 deletions
			
			
		
								
									
										File diff suppressed because one or more lines are too long
									
								
							
						
					@ -0,0 +1,127 @@ | 
				
			|||||
 | 
					{ | 
				
			||||
 | 
					 "cells": [ | 
				
			||||
 | 
					  { | 
				
			||||
 | 
					   "cell_type": "code", | 
				
			||||
 | 
					   "execution_count": 1, | 
				
			||||
 | 
					   "id": "aef142c9-a3d0-408c-a87f-695167d49dea", | 
				
			||||
 | 
					   "metadata": { | 
				
			||||
 | 
					    "tags": [] | 
				
			||||
 | 
					   }, | 
				
			||||
 | 
					   "outputs": [ | 
				
			||||
 | 
					    { | 
				
			||||
 | 
					     "name": "stdout", | 
				
			||||
 | 
					     "output_type": "stream", | 
				
			||||
 | 
					     "text": [ | 
				
			||||
 | 
					      "(61, 181, 2)\n", | 
				
			||||
 | 
					      "(60, 180)\n", | 
				
			||||
 | 
					      "(432, 25)\n", | 
				
			||||
 | 
					      "(432, 4, 2)\n", | 
				
			||||
 | 
					      "(432, 72)\n" | 
				
			||||
 | 
					     ] | 
				
			||||
 | 
					    }, | 
				
			||||
 | 
					    { | 
				
			||||
 | 
					     "ename": "RuntimeError", | 
				
			||||
 | 
					     "evalue": "Attempting to deserialize object on a CUDA device but torch.cuda.is_available() is False. If you are running on a CPU-only machine, please use torch.load with map_location=torch.device('cpu') to map your storages to the CPU.", | 
				
			||||
 | 
					     "output_type": "error", | 
				
			||||
 | 
					     "traceback": [ | 
				
			||||
 | 
					      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", | 
				
			||||
 | 
					      "\u001b[0;31mRuntimeError\u001b[0m                              Traceback (most recent call last)", | 
				
			||||
 | 
					      "Cell \u001b[0;32mIn[1], line 30\u001b[0m\n\u001b[1;32m     27\u001b[0m loss_function \u001b[38;5;241m=\u001b[39m nn\u001b[38;5;241m.\u001b[39mMSELoss()\n\u001b[1;32m     29\u001b[0m \u001b[38;5;66;03m# Predict \u001b[39;00m\n\u001b[0;32m---> 30\u001b[0m pred \u001b[38;5;241m=\u001b[39m \u001b[43mtest\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43mcheckpoints/ANN_\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m \u001b[49m\u001b[38;5;241;43m+\u001b[39;49m\u001b[43m \u001b[49m\u001b[43mpretrained_mod\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m+\u001b[39;49m\u001b[43m \u001b[49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43m_opt.pt\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mX\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m     32\u001b[0m \u001b[38;5;66;03m# Calculate loss\u001b[39;00m\n\u001b[1;32m     33\u001b[0m pred_loss\u001b[38;5;241m=\u001b[39m[]\n", | 
				
			||||
 | 
					      "File \u001b[0;32m/workspace/EMsFEA-net/test.py:13\u001b[0m, in \u001b[0;36mtest\u001b[0;34m(model_load_path, X, standard, device)\u001b[0m\n\u001b[1;32m     12\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mtest\u001b[39m(model_load_path, X, standard \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mFalse\u001b[39;00m, device \u001b[38;5;241m=\u001b[39m \u001b[38;5;241m0\u001b[39m):\n\u001b[0;32m---> 13\u001b[0m     model \u001b[38;5;241m=\u001b[39m \u001b[43mtorch\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mload\u001b[49m\u001b[43m(\u001b[49m\u001b[43mmodel_load_path\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m     15\u001b[0m     \u001b[38;5;28;01mif\u001b[39;00m standard:\n\u001b[1;32m     16\u001b[0m         X \u001b[38;5;241m=\u001b[39m standardization(X)\n", | 
				
			||||
 | 
					      "File \u001b[0;32m/opt/conda/lib/python3.10/site-packages/torch/serialization.py:1014\u001b[0m, in \u001b[0;36mload\u001b[0;34m(f, map_location, pickle_module, weights_only, mmap, **pickle_load_args)\u001b[0m\n\u001b[1;32m   1012\u001b[0m             \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mRuntimeError\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[1;32m   1013\u001b[0m                 \u001b[38;5;28;01mraise\u001b[39;00m pickle\u001b[38;5;241m.\u001b[39mUnpicklingError(UNSAFE_MESSAGE \u001b[38;5;241m+\u001b[39m \u001b[38;5;28mstr\u001b[39m(e)) \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[0;32m-> 1014\u001b[0m         \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43m_load\u001b[49m\u001b[43m(\u001b[49m\u001b[43mopened_zipfile\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m   1015\u001b[0m \u001b[43m                     \u001b[49m\u001b[43mmap_location\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m   1016\u001b[0m \u001b[43m                     \u001b[49m\u001b[43mpickle_module\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m   1017\u001b[0m \u001b[43m                     \u001b[49m\u001b[43moverall_storage\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43moverall_storage\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m   1018\u001b[0m \u001b[43m                     \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mpickle_load_args\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m   1019\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m mmap:\n\u001b[1;32m   1020\u001b[0m     \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mRuntimeError\u001b[39;00m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mmmap can only be used with files saved with \u001b[39m\u001b[38;5;124m\"\u001b[39m,\n\u001b[1;32m   1021\u001b[0m                        \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m`torch.save(_use_new_zipfile_serialization=True), \u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m   1022\u001b[0m                        \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mplease torch.save your checkpoint with this option in order to use mmap.\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n", | 
				
			||||
 | 
					      "File \u001b[0;32m/opt/conda/lib/python3.10/site-packages/torch/serialization.py:1422\u001b[0m, in \u001b[0;36m_load\u001b[0;34m(zip_file, map_location, pickle_module, pickle_file, overall_storage, **pickle_load_args)\u001b[0m\n\u001b[1;32m   1420\u001b[0m unpickler \u001b[38;5;241m=\u001b[39m UnpicklerWrapper(data_file, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mpickle_load_args)\n\u001b[1;32m   1421\u001b[0m unpickler\u001b[38;5;241m.\u001b[39mpersistent_load \u001b[38;5;241m=\u001b[39m persistent_load\n\u001b[0;32m-> 1422\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[43munpickler\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mload\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m   1424\u001b[0m torch\u001b[38;5;241m.\u001b[39m_utils\u001b[38;5;241m.\u001b[39m_validate_loaded_sparse_tensors()\n\u001b[1;32m   1425\u001b[0m torch\u001b[38;5;241m.\u001b[39m_C\u001b[38;5;241m.\u001b[39m_log_api_usage_metadata(\n\u001b[1;32m   1426\u001b[0m     \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mtorch.load.metadata\u001b[39m\u001b[38;5;124m\"\u001b[39m, {\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mserialization_id\u001b[39m\u001b[38;5;124m\"\u001b[39m: zip_file\u001b[38;5;241m.\u001b[39mserialization_id()}\n\u001b[1;32m   1427\u001b[0m )\n", | 
				
			||||
 | 
					      "File \u001b[0;32m/opt/conda/lib/python3.10/site-packages/torch/serialization.py:1392\u001b[0m, in \u001b[0;36m_load.<locals>.persistent_load\u001b[0;34m(saved_id)\u001b[0m\n\u001b[1;32m   1390\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m   1391\u001b[0m     nbytes \u001b[38;5;241m=\u001b[39m numel \u001b[38;5;241m*\u001b[39m torch\u001b[38;5;241m.\u001b[39m_utils\u001b[38;5;241m.\u001b[39m_element_size(dtype)\n\u001b[0;32m-> 1392\u001b[0m     typed_storage \u001b[38;5;241m=\u001b[39m \u001b[43mload_tensor\u001b[49m\u001b[43m(\u001b[49m\u001b[43mdtype\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mnbytes\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mkey\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m_maybe_decode_ascii\u001b[49m\u001b[43m(\u001b[49m\u001b[43mlocation\u001b[49m\u001b[43m)\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m   1394\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m typed_storage\n", | 
				
			||||
 | 
					      "File \u001b[0;32m/opt/conda/lib/python3.10/site-packages/torch/serialization.py:1366\u001b[0m, in \u001b[0;36m_load.<locals>.load_tensor\u001b[0;34m(dtype, numel, key, location)\u001b[0m\n\u001b[1;32m   1361\u001b[0m         storage\u001b[38;5;241m.\u001b[39mbyteswap(dtype)\n\u001b[1;32m   1363\u001b[0m \u001b[38;5;66;03m# TODO: Once we decide to break serialization FC, we can\u001b[39;00m\n\u001b[1;32m   1364\u001b[0m \u001b[38;5;66;03m# stop wrapping with TypedStorage\u001b[39;00m\n\u001b[1;32m   1365\u001b[0m typed_storage \u001b[38;5;241m=\u001b[39m torch\u001b[38;5;241m.\u001b[39mstorage\u001b[38;5;241m.\u001b[39mTypedStorage(\n\u001b[0;32m-> 1366\u001b[0m     wrap_storage\u001b[38;5;241m=\u001b[39m\u001b[43mrestore_location\u001b[49m\u001b[43m(\u001b[49m\u001b[43mstorage\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mlocation\u001b[49m\u001b[43m)\u001b[49m,\n\u001b[1;32m   1367\u001b[0m     dtype\u001b[38;5;241m=\u001b[39mdtype,\n\u001b[1;32m   1368\u001b[0m     _internal\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mTrue\u001b[39;00m)\n\u001b[1;32m   1370\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m typed_storage\u001b[38;5;241m.\u001b[39m_data_ptr() \u001b[38;5;241m!=\u001b[39m \u001b[38;5;241m0\u001b[39m:\n\u001b[1;32m   1371\u001b[0m     loaded_storages[key] \u001b[38;5;241m=\u001b[39m typed_storage\n", | 
				
			||||
 | 
					      "File \u001b[0;32m/opt/conda/lib/python3.10/site-packages/torch/serialization.py:381\u001b[0m, in \u001b[0;36mdefault_restore_location\u001b[0;34m(storage, location)\u001b[0m\n\u001b[1;32m    379\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mdefault_restore_location\u001b[39m(storage, location):\n\u001b[1;32m    380\u001b[0m     \u001b[38;5;28;01mfor\u001b[39;00m _, _, fn \u001b[38;5;129;01min\u001b[39;00m _package_registry:\n\u001b[0;32m--> 381\u001b[0m         result \u001b[38;5;241m=\u001b[39m \u001b[43mfn\u001b[49m\u001b[43m(\u001b[49m\u001b[43mstorage\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mlocation\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m    382\u001b[0m         \u001b[38;5;28;01mif\u001b[39;00m result \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[1;32m    383\u001b[0m             \u001b[38;5;28;01mreturn\u001b[39;00m result\n", | 
				
			||||
 | 
					      "File \u001b[0;32m/opt/conda/lib/python3.10/site-packages/torch/serialization.py:274\u001b[0m, in \u001b[0;36m_cuda_deserialize\u001b[0;34m(obj, location)\u001b[0m\n\u001b[1;32m    272\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m_cuda_deserialize\u001b[39m(obj, location):\n\u001b[1;32m    273\u001b[0m     \u001b[38;5;28;01mif\u001b[39;00m location\u001b[38;5;241m.\u001b[39mstartswith(\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mcuda\u001b[39m\u001b[38;5;124m'\u001b[39m):\n\u001b[0;32m--> 274\u001b[0m         device \u001b[38;5;241m=\u001b[39m \u001b[43mvalidate_cuda_device\u001b[49m\u001b[43m(\u001b[49m\u001b[43mlocation\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m    275\u001b[0m         \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mgetattr\u001b[39m(obj, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m_torch_load_uninitialized\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;28;01mFalse\u001b[39;00m):\n\u001b[1;32m    276\u001b[0m             \u001b[38;5;28;01mwith\u001b[39;00m torch\u001b[38;5;241m.\u001b[39mcuda\u001b[38;5;241m.\u001b[39mdevice(device):\n", | 
				
			||||
 | 
					      "File \u001b[0;32m/opt/conda/lib/python3.10/site-packages/torch/serialization.py:258\u001b[0m, in \u001b[0;36mvalidate_cuda_device\u001b[0;34m(location)\u001b[0m\n\u001b[1;32m    255\u001b[0m device \u001b[38;5;241m=\u001b[39m torch\u001b[38;5;241m.\u001b[39mcuda\u001b[38;5;241m.\u001b[39m_utils\u001b[38;5;241m.\u001b[39m_get_device_index(location, \u001b[38;5;28;01mTrue\u001b[39;00m)\n\u001b[1;32m    257\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m torch\u001b[38;5;241m.\u001b[39mcuda\u001b[38;5;241m.\u001b[39mis_available():\n\u001b[0;32m--> 258\u001b[0m     \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mRuntimeError\u001b[39;00m(\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mAttempting to deserialize object on a CUDA \u001b[39m\u001b[38;5;124m'\u001b[39m\n\u001b[1;32m    259\u001b[0m                        \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mdevice but torch.cuda.is_available() is False. \u001b[39m\u001b[38;5;124m'\u001b[39m\n\u001b[1;32m    260\u001b[0m                        \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mIf you are running on a CPU-only machine, \u001b[39m\u001b[38;5;124m'\u001b[39m\n\u001b[1;32m    261\u001b[0m                        \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mplease use torch.load with map_location=torch.device(\u001b[39m\u001b[38;5;130;01m\\'\u001b[39;00m\u001b[38;5;124mcpu\u001b[39m\u001b[38;5;130;01m\\'\u001b[39;00m\u001b[38;5;124m) \u001b[39m\u001b[38;5;124m'\u001b[39m\n\u001b[1;32m    262\u001b[0m                        \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mto map your storages to the CPU.\u001b[39m\u001b[38;5;124m'\u001b[39m)\n\u001b[1;32m    263\u001b[0m device_count \u001b[38;5;241m=\u001b[39m torch\u001b[38;5;241m.\u001b[39mcuda\u001b[38;5;241m.\u001b[39mdevice_count()\n\u001b[1;32m    264\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m device \u001b[38;5;241m>\u001b[39m\u001b[38;5;241m=\u001b[39m device_count:\n", | 
				
			||||
 | 
					      "\u001b[0;31mRuntimeError\u001b[0m: Attempting to deserialize object on a CUDA device but torch.cuda.is_available() is False. If you are running on a CPU-only machine, please use torch.load with map_location=torch.device('cpu') to map your storages to the CPU." | 
				
			||||
 | 
					     ] | 
				
			||||
 | 
					    } | 
				
			||||
 | 
					   ], | 
				
			||||
 | 
					   "source": [ | 
				
			||||
 | 
					    "from test import test\n", | 
				
			||||
 | 
					    "\n", | 
				
			||||
 | 
					    "import numpy as np\n", | 
				
			||||
 | 
					    "\n", | 
				
			||||
 | 
					    "import torch\n", | 
				
			||||
 | 
					    "import torch.nn as nn\n", | 
				
			||||
 | 
					    "import torch.nn.functional as F\n", | 
				
			||||
 | 
					    "\n", | 
				
			||||
 | 
					    "from utils.data_standardizer import standardization\n", | 
				
			||||
 | 
					    "from utils.data_loader import data_loader\n", | 
				
			||||
 | 
					    "\n", | 
				
			||||
 | 
					    "import matplotlib.pyplot as plt\n", | 
				
			||||
 | 
					    "\n", | 
				
			||||
 | 
					    "# Load datasets\n", | 
				
			||||
 | 
					    "# test data select:\n", | 
				
			||||
 | 
					    "dataload_mod='mod3' # opt: mod1 mod2 mod3\n", | 
				
			||||
 | 
					    "# pretrained model select:\n", | 
				
			||||
 | 
					    "pretrained_mod='mod3' # opt: mod1 mod2 mod3\n", | 
				
			||||
 | 
					    "\n", | 
				
			||||
 | 
					    "dst_path='datasets/top88_'+ dataload_mod + '_xPhys_180_60.npy'\n", | 
				
			||||
 | 
					    "U_path='datasets/top88_'+ dataload_mod + '_u_180_60.npy'\n", | 
				
			||||
 | 
					    "global_density, global_displace, coarse_density, coarse_displace, fine_displace = data_loader(dst_path, U_path)\n", | 
				
			||||
 | 
					    "X = np.hstack((coarse_density[:,:] , coarse_displace[:,:,0] , coarse_displace[:,:,1]))\n", | 
				
			||||
 | 
					    "Y = fine_displace[:,:]\n", | 
				
			||||
 | 
					    "\n", | 
				
			||||
 | 
					    "# Set loss function\n", | 
				
			||||
 | 
					    "loss_function = nn.MSELoss()\n", | 
				
			||||
 | 
					    "\n", | 
				
			||||
 | 
					    "# Predict \n", | 
				
			||||
 | 
					    "pred = test('checkpoints/ANN_' + pretrained_mod + '_opt.pt', X)\n", | 
				
			||||
 | 
					    "\n", | 
				
			||||
 | 
					    "# Calculate loss\n", | 
				
			||||
 | 
					    "pred_loss=[]\n", | 
				
			||||
 | 
					    "device = f'cuda:{0}' if torch.cuda.is_available() else 'cpu'\n", | 
				
			||||
 | 
					    "Y = torch.from_numpy(Y).type(torch.float32).to(device)\n", | 
				
			||||
 | 
					    "for i in range(pred.shape[0]):\n", | 
				
			||||
 | 
					    "    pred_loss.append(loss_function(pred[i,:],Y[i,:]).item())\n", | 
				
			||||
 | 
					    "\n", | 
				
			||||
 | 
					    "print('Total loss: '+ str(loss_function(pred,Y).item()))\n", | 
				
			||||
 | 
					    "\n", | 
				
			||||
 | 
					    "# Plot \n", | 
				
			||||
 | 
					    "plt.plot(range(pred.shape[0]),pred_loss)\n", | 
				
			||||
 | 
					    "plt.ylabel('Loss')\n", | 
				
			||||
 | 
					    "plt.xlabel('Coarse mesh id')\n", | 
				
			||||
 | 
					    "plt.title(\"Linear graph\") \n", | 
				
			||||
 | 
					    "plt.show()\n", | 
				
			||||
 | 
					    "\n", | 
				
			||||
 | 
					    "loss_metrix = np.asarray(pred_loss)\n", | 
				
			||||
 | 
					    "loss_metrix = loss_metrix.reshape(int(60/5), int(180/5))\n", | 
				
			||||
 | 
					    "plt.matshow(loss_metrix)\n", | 
				
			||||
 | 
					    "plt.title(\"Show loss value in grid\") \n", | 
				
			||||
 | 
					    "plt.show()" | 
				
			||||
 | 
					   ] | 
				
			||||
 | 
					  }, | 
				
			||||
 | 
					  { | 
				
			||||
 | 
					   "cell_type": "code", | 
				
			||||
 | 
					   "execution_count": null, | 
				
			||||
 | 
					   "id": "a2946a86-77ac-466f-8869-aa91f5e2602c", | 
				
			||||
 | 
					   "metadata": {}, | 
				
			||||
 | 
					   "outputs": [], | 
				
			||||
 | 
					   "source": [] | 
				
			||||
 | 
					  } | 
				
			||||
 | 
					 ], | 
				
			||||
 | 
					 "metadata": { | 
				
			||||
 | 
					  "kernelspec": { | 
				
			||||
 | 
					   "display_name": "base", | 
				
			||||
 | 
					   "language": "python", | 
				
			||||
 | 
					   "name": "base" | 
				
			||||
 | 
					  }, | 
				
			||||
 | 
					  "language_info": { | 
				
			||||
 | 
					   "codemirror_mode": { | 
				
			||||
 | 
					    "name": "ipython", | 
				
			||||
 | 
					    "version": 3 | 
				
			||||
 | 
					   }, | 
				
			||||
 | 
					   "file_extension": ".py", | 
				
			||||
 | 
					   "mimetype": "text/x-python", | 
				
			||||
 | 
					   "name": "python", | 
				
			||||
 | 
					   "nbconvert_exporter": "python", | 
				
			||||
 | 
					   "pygments_lexer": "ipython3", | 
				
			||||
 | 
					   "version": "3.10.13" | 
				
			||||
 | 
					  } | 
				
			||||
 | 
					 }, | 
				
			||||
 | 
					 "nbformat": 4, | 
				
			||||
 | 
					 "nbformat_minor": 5 | 
				
			||||
 | 
					} | 
				
			||||
					Loading…
					
					
				
		Reference in new issue