rithwiks commited on
Commit
a4fa30a
1 Parent(s): aaf6321

data filtering code

Browse files
utils/.ipynb_checkpoints/sdss_filtering-checkpoint.ipynb ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [],
3
+ "metadata": {},
4
+ "nbformat": 4,
5
+ "nbformat_minor": 5
6
+ }
utils/eval_baselines.py CHANGED
@@ -32,6 +32,17 @@ from imagecodecs import (
32
  jpegxl_encode_max_effort_preset = lambda x: jpegxl_encode(x, lossless=True, effort=9)
33
  jpegxl_encode_preset = lambda x: jpegxl_encode(x, lossless=True)
34
 
 
 
 
 
 
 
 
 
 
 
 
35
  def find_matching_files():
36
  """
37
  Returns list of test set file paths.
@@ -80,10 +91,35 @@ def main(dim):
80
  with fits.open(path) as hdul:
81
  if dim == '2d':
82
  arr = hdul[0].data[0][2]
 
 
 
 
 
 
 
 
 
83
  elif dim == '3dt' and len(hdul[0].data) > 2:
84
  arr = hdul[0].data[0:3][2]
 
85
  elif dim == '3dw' and len(hdul[0].data[0]) > 2:
86
  arr = hdul[0].data[0][0:3]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
87
  else:
88
  continue
89
 
@@ -92,26 +128,31 @@ def main(dim):
92
  print(df.mean())
93
  df.to_csv(save_path)
94
 
95
- for algo in ALL_CODECS.keys():
96
- try:
97
- if algo == "JPEG_2K" and dim != '2d':
98
- test_results = benchmark_imagecodecs_compression_algos(arr.transpose(1, 2, 0), algo)
99
- else:
100
- test_results = benchmark_imagecodecs_compression_algos(arr, algo)
 
 
 
 
 
 
 
 
 
101
 
102
- for column, value in test_results.items():
103
- if column in df.columns:
104
- df.at[path, column] = value
105
-
106
- except Exception as e:
107
- print(f"Failed at {path} under exception {e}.")
108
-
109
 
110
  if __name__ == "__main__":
111
  parser = argparse.ArgumentParser(description="Process some 2D or 3D data.")
112
  parser.add_argument(
113
  "dimension",
114
- choices=['2d', '3dt', '3dw'],
115
  help="Specify whether the data is 2d, 3dt (3d time dimension), or 3dw (3d wavelength dimension)."
116
  )
117
  args = parser.parse_args()
@@ -119,7 +160,13 @@ if __name__ == "__main__":
119
 
120
  # RICE REQUIRES UNIQUE INPUT OF ARR SHAPE AND DTYPE INTO DECODER
121
 
122
- if dim == '2d':
 
 
 
 
 
 
123
  ALL_CODECS = {
124
  "JPEG_XL_MAX_EFFORT": [jpegxl_encode_max_effort_preset, jpegxl_decode],
125
  "JPEG_XL": [jpegxl_encode_preset, jpegxl_decode],
@@ -127,12 +174,6 @@ if __name__ == "__main__":
127
  "JPEG_LS": [jpegls_encode, jpegls_decode],
128
  "RICE": [rcomp_encode, rcomp_decode],
129
  }
130
- else:
131
- ALL_CODECS = {
132
- "JPEG_XL_MAX_EFFORT": [jpegxl_encode_max_effort_preset, jpegxl_decode],
133
- "JPEG_XL": [jpegxl_encode_preset, jpegxl_decode],
134
- "JPEG_2K": [jpeg2k_encode, jpeg2k_decode],
135
- }
136
 
137
  columns = []
138
  for algo in ALL_CODECS.keys():
 
32
  jpegxl_encode_max_effort_preset = lambda x: jpegxl_encode(x, lossless=True, effort=9)
33
  jpegxl_encode_preset = lambda x: jpegxl_encode(x, lossless=True)
34
 
35
+
36
+ def split_uint16_to_uint8(arr):
37
+ # Ensure the input is of the correct type
38
+ assert arr.dtype == np.uint16, "Input array must be of type np.uint16"
39
+
40
+ # Compute the top 8 bits and the bottom 8 bits
41
+ top_bits = (arr >> 8).astype(np.uint8)
42
+ bottom_bits = (arr & 0xFF).astype(np.uint8)
43
+
44
+ return top_bits, bottom_bits
45
+
46
  def find_matching_files():
47
  """
48
  Returns list of test set file paths.
 
91
  with fits.open(path) as hdul:
92
  if dim == '2d':
93
  arr = hdul[0].data[0][2]
94
+ arrs = [arr]
95
+ elif dim == '2d-top':
96
+ arr = hdul[0].data[0][2]
97
+ arr = split_uint16_to_uint8(arr)[0]
98
+ arrs = [arr]
99
+ elif dim == '2d-bottom':
100
+ arr = hdul[0].data[0][2]
101
+ arr = split_uint16_to_uint8(arr)[1]
102
+ arrs = [arr]
103
  elif dim == '3dt' and len(hdul[0].data) > 2:
104
  arr = hdul[0].data[0:3][2]
105
+ arrs = [arr]
106
  elif dim == '3dw' and len(hdul[0].data[0]) > 2:
107
  arr = hdul[0].data[0][0:3]
108
+ arrs = [arr]
109
+ elif dim == '3dt_reshape' and len(hdul[0].data) > 2:
110
+ arr = hdul[0].data[0:3][2].reshape((800, -1))
111
+ arrs = [arr]
112
+ elif dim == '3dw_reshape' and len(hdul[0].data[0]) > 2:
113
+ arr = hdul[0].data[0][0:3].reshape((800, -1))
114
+ arrs = [arr]
115
+ elif dim == 'tw':
116
+ init_arr = hdul[0].data
117
+ def arrs_gen():
118
+ for i in range(init_arr.shape[-2]):
119
+ for j in range(init_arr.shape[-1]):
120
+ yield init_arr[:, :, i, j]
121
+
122
+ arrs = arrs_gen()
123
  else:
124
  continue
125
 
 
128
  print(df.mean())
129
  df.to_csv(save_path)
130
 
131
+ for arr_idx, arr in enumerate(arrs):
132
+ for algo in ALL_CODECS.keys():
133
+ try:
134
+ if algo == "JPEG_2K" and (dim == '3dt' or dim == '3dw'):
135
+ test_results = benchmark_imagecodecs_compression_algos(arr.transpose(1, 2, 0), algo)
136
+ else:
137
+ test_results = benchmark_imagecodecs_compression_algos(arr, algo)
138
+
139
+ for column, value in test_results.items():
140
+ if column in df.columns:
141
+ df.at[path + f"_arr_{arr_idx}", column] = value
142
+
143
+ except Exception as e:
144
+ print(f"Failed at {path} under exception {e}.")
145
+
146
 
147
+ print(df.mean())
148
+ df.to_csv(save_path)
149
+
 
 
 
 
150
 
151
  if __name__ == "__main__":
152
  parser = argparse.ArgumentParser(description="Process some 2D or 3D data.")
153
  parser.add_argument(
154
  "dimension",
155
+ choices=['2d', '2d-top', '2d-bottom', '3dt', '3dw', 'tw', '3dt_reshape', '3dw_reshape'],
156
  help="Specify whether the data is 2d, 3dt (3d time dimension), or 3dw (3d wavelength dimension)."
157
  )
158
  args = parser.parse_args()
 
160
 
161
  # RICE REQUIRES UNIQUE INPUT OF ARR SHAPE AND DTYPE INTO DECODER
162
 
163
+ if dim == '3dw' or dim == '3dt' or dim == 'tw':
164
+ ALL_CODECS = {
165
+ "JPEG_XL_MAX_EFFORT": [jpegxl_encode_max_effort_preset, jpegxl_decode],
166
+ "JPEG_XL": [jpegxl_encode_preset, jpegxl_decode],
167
+ "JPEG_2K": [jpeg2k_encode, jpeg2k_decode],
168
+ }
169
+ else:
170
  ALL_CODECS = {
171
  "JPEG_XL_MAX_EFFORT": [jpegxl_encode_max_effort_preset, jpegxl_decode],
172
  "JPEG_XL": [jpegxl_encode_preset, jpegxl_decode],
 
174
  "JPEG_LS": [jpegls_encode, jpegls_decode],
175
  "RICE": [rcomp_encode, rcomp_decode],
176
  }
 
 
 
 
 
 
177
 
178
  columns = []
179
  for algo in ALL_CODECS.keys():
utils/sdss_filtering.ipynb ADDED
@@ -0,0 +1,614 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 3,
6
+ "id": "c2058f8d",
7
+ "metadata": {},
8
+ "outputs": [
9
+ {
10
+ "data": {
11
+ "text/plain": [
12
+ "522"
13
+ ]
14
+ },
15
+ "execution_count": 3,
16
+ "metadata": {},
17
+ "output_type": "execute_result"
18
+ }
19
+ ],
20
+ "source": [
21
+ "import os\n",
22
+ "from tqdm import tqdm\n",
23
+ "import glob\n",
24
+ "from astropy.io import fits\n",
25
+ "import os\n",
26
+ "from astropy.io import fits\n",
27
+ "from astropy.wcs import WCS\n",
28
+ "from spherical_geometry.polygon import SphericalPolygon\n",
29
+ "import os\n",
30
+ "from astropy.io import fits\n",
31
+ "from astropy.wcs import WCS\n",
32
+ "from spherical_geometry.polygon import SphericalPolygon\n",
33
+ "from sklearn.cluster import AgglomerativeClustering\n",
34
+ "import matplotlib.pyplot as plt\n",
35
+ "import pandas as pd\n",
36
+ "from astropy.io import fits\n",
37
+ "import pandas as pd\n",
38
+ "import matplotlib.pyplot as plt\n",
39
+ "import numpy as np\n",
40
+ "\n",
41
+ "def get_all_fits_files(root_dir):\n",
42
+ " # Use glob to recursively find all .fits files\n",
43
+ " pattern = os.path.join(root_dir, '**', '*.fits')\n",
44
+ " fits_files = glob.glob(pattern, recursive=True)\n",
45
+ " return fits_files\n",
46
+ "\n",
47
+ "valid_fits_paths = get_all_fits_files('./data')\n",
48
+ "len(valid_fits_paths)"
49
+ ]
50
+ },
51
+ {
52
+ "cell_type": "code",
53
+ "execution_count": 11,
54
+ "id": "554c2fa7",
55
+ "metadata": {},
56
+ "outputs": [
57
+ {
58
+ "name": "stderr",
59
+ "output_type": "stream",
60
+ "text": [
61
+ " 9%|███▊ | 47/522 [00:28<06:45, 1.17it/s]WARNING: FITSFixedWarning: RADECSYS= 'ICRS ' / International Celestial Reference Sys \n",
62
+ "the RADECSYS keyword is deprecated, use RADESYSa. [astropy.wcs.wcs]\n",
63
+ "100%|█████████████████████████████████████████| 522/522 [06:48<00:00, 1.28it/s]\n"
64
+ ]
65
+ }
66
+ ],
67
+ "source": [
68
+ "# Initialize the list of confirmed FITS paths\n",
69
+ "confirmed_fits_paths = []\n",
70
+ "\n",
71
+ "all_polys = []\n",
72
+ "\n",
73
+ "for i in tqdm(range(len(valid_fits_paths))):\n",
74
+ " path1 = valid_fits_paths[i]\n",
75
+ " try:\n",
76
+ " with fits.open(path1) as hdul:\n",
77
+ " hdul[0].data = hdul[0].data[0, 0]\n",
78
+ " wcs1a = WCS(hdul[0].header)\n",
79
+ " shape1a = sorted(tuple(wcs1a.pixel_shape))[:2]\n",
80
+ " footprint1a = wcs1a.calc_footprint(axes=shape1a)\n",
81
+ " poly1a = SphericalPolygon.from_radec(footprint1a[:, 0], footprint1a[:, 1])\n",
82
+ " all_polys.append(poly1a)\n",
83
+ " except Exception as e:\n",
84
+ " print(e)\n",
85
+ " continue"
86
+ ]
87
+ },
88
+ {
89
+ "cell_type": "code",
90
+ "execution_count": 13,
91
+ "id": "c58c3c55",
92
+ "metadata": {},
93
+ "outputs": [
94
+ {
95
+ "name": "stderr",
96
+ "output_type": "stream",
97
+ "text": [
98
+ "100%|██████████████████████████████████████| 522/522 [00:00<00:00, 17983.71it/s]\n"
99
+ ]
100
+ }
101
+ ],
102
+ "source": [
103
+ "latitudes = []\n",
104
+ "longitudes = []\n",
105
+ "\n",
106
+ "for poly in tqdm(all_polys):\n",
107
+ " pts = list(poly.to_radec())[0]\n",
108
+ " ra = pts[0][0]\n",
109
+ " dec = pts[1][0]\n",
110
+ " \n",
111
+ " longitudes.append(ra)\n",
112
+ " latitudes.append(dec)"
113
+ ]
114
+ },
115
+ {
116
+ "cell_type": "code",
117
+ "execution_count": 14,
118
+ "id": "1c83484e",
119
+ "metadata": {},
120
+ "outputs": [
121
+ {
122
+ "name": "stdout",
123
+ "output_type": "stream",
124
+ "text": [
125
+ "Symmetric?\n",
126
+ "True\n",
127
+ "(522, 522)\n"
128
+ ]
129
+ }
130
+ ],
131
+ "source": [
132
+ "n_points = len(latitudes)\n",
133
+ "\n",
134
+ "# Repeat each point n_points times for lat1, lon1\n",
135
+ "lat1 = np.repeat(latitudes, n_points)\n",
136
+ "lon1 = np.repeat(longitudes, n_points)\n",
137
+ "\n",
138
+ "# Tile the whole array n_points times for lat2, lon2\n",
139
+ "lat2 = np.tile(latitudes, n_points)\n",
140
+ "lon2 = np.tile(longitudes, n_points)\n",
141
+ "\n",
142
+ "# Calculates angular separation between two spherical coords\n",
143
+ "# This can be lat/lon or ra/dec\n",
144
+ "# Taken from astropy\n",
145
+ "def angular_separation_deg(lon1, lat1, lon2, lat2):\n",
146
+ " lon1 = np.deg2rad(lon1)\n",
147
+ " lon2 = np.deg2rad(lon2)\n",
148
+ " lat1 = np.deg2rad(lat1)\n",
149
+ " lat2 = np.deg2rad(lat2)\n",
150
+ " \n",
151
+ " sdlon = np.sin(lon2 - lon1)\n",
152
+ " cdlon = np.cos(lon2 - lon1)\n",
153
+ " slat1 = np.sin(lat1)\n",
154
+ " slat2 = np.sin(lat2)\n",
155
+ " clat1 = np.cos(lat1)\n",
156
+ " clat2 = np.cos(lat2)\n",
157
+ "\n",
158
+ " num1 = clat2 * sdlon\n",
159
+ " num2 = clat1 * slat2 - slat1 * clat2 * cdlon\n",
160
+ " denominator = slat1 * slat2 + clat1 * clat2 * cdlon\n",
161
+ "\n",
162
+ " return np.rad2deg(np.arctan2(np.hypot(num1, num2), denominator))\n",
163
+ "\n",
164
+ "# Compute the pairwise angular separations\n",
165
+ "angular_separations = angular_separation_deg(lon1, lat1, lon2, lat2)\n",
166
+ "\n",
167
+ "# Reshape the result into a matrix form\n",
168
+ "angular_separations_matrix = angular_separations.reshape(n_points, n_points)\n",
169
+ "\n",
170
+ "def check_symmetric(a, rtol=1e-05, atol=1e-07):\n",
171
+ " return np.allclose(a, a.T, rtol=rtol, atol=atol)\n",
172
+ "\n",
173
+ "print(\"Symmetric?\")\n",
174
+ "print(check_symmetric(angular_separations_matrix))\n",
175
+ "print(angular_separations_matrix.shape)"
176
+ ]
177
+ },
178
+ {
179
+ "cell_type": "code",
180
+ "execution_count": 19,
181
+ "id": "c66e8c1e",
182
+ "metadata": {},
183
+ "outputs": [],
184
+ "source": [
185
+ "SDSS_FOV = 0.088\n",
186
+ "\n",
187
+ "THRESH = SDSS_FOV * 4\n",
188
+ "\n",
189
+ "clustering = AgglomerativeClustering(n_clusters=None, metric='precomputed', linkage='single', distance_threshold=THRESH)\n",
190
+ "labels = clustering.fit_predict(angular_separations_matrix)"
191
+ ]
192
+ },
193
+ {
194
+ "cell_type": "code",
195
+ "execution_count": 24,
196
+ "id": "51da93b0",
197
+ "metadata": {},
198
+ "outputs": [
199
+ {
200
+ "name": "stderr",
201
+ "output_type": "stream",
202
+ "text": [
203
+ " 3%|█▍ | 13/377 [00:00<00:21, 16.71it/s]"
204
+ ]
205
+ },
206
+ {
207
+ "name": "stdout",
208
+ "output_type": "stream",
209
+ "text": [
210
+ "FAIL label: 10, i: 1 IoU: 0.25944657797281734\n"
211
+ ]
212
+ },
213
+ {
214
+ "name": "stderr",
215
+ "output_type": "stream",
216
+ "text": [
217
+ " 8%|███▏ | 29/377 [00:01<00:15, 21.76it/s]"
218
+ ]
219
+ },
220
+ {
221
+ "name": "stdout",
222
+ "output_type": "stream",
223
+ "text": [
224
+ "FAIL label: 23, i: 2 IoU: 0.4090236242653364\n"
225
+ ]
226
+ },
227
+ {
228
+ "name": "stderr",
229
+ "output_type": "stream",
230
+ "text": [
231
+ " 9%|███▉ | 35/377 [00:02<00:15, 22.08it/s]"
232
+ ]
233
+ },
234
+ {
235
+ "name": "stdout",
236
+ "output_type": "stream",
237
+ "text": [
238
+ "FAIL label: 29, i: 2 IoU: 0.9007704140270892\n",
239
+ "FAIL label: 32, i: 2 IoU: 0.4056510771965438\n"
240
+ ]
241
+ },
242
+ {
243
+ "name": "stderr",
244
+ "output_type": "stream",
245
+ "text": [
246
+ "\r",
247
+ " 10%|████▏ | 38/377 [00:02<00:18, 18.22it/s]"
248
+ ]
249
+ },
250
+ {
251
+ "name": "stdout",
252
+ "output_type": "stream",
253
+ "text": [
254
+ "FAIL label: 36, i: 2 IoU: 0.5762689111619909\n"
255
+ ]
256
+ },
257
+ {
258
+ "name": "stderr",
259
+ "output_type": "stream",
260
+ "text": [
261
+ " 15%|██████▏ | 55/377 [00:02<00:10, 31.49it/s]"
262
+ ]
263
+ },
264
+ {
265
+ "name": "stdout",
266
+ "output_type": "stream",
267
+ "text": [
268
+ "FAIL label: 47, i: 1 IoU: 0.9250531562187404\n",
269
+ "FAIL label: 48, i: 1 IoU: 0.8360586649509192\n",
270
+ "FAIL label: 53, i: 1 IoU: 0.10008227926664898\n"
271
+ ]
272
+ },
273
+ {
274
+ "name": "stderr",
275
+ "output_type": "stream",
276
+ "text": [
277
+ " 19%|████████▏ | 73/377 [00:03<00:09, 33.33it/s]"
278
+ ]
279
+ },
280
+ {
281
+ "name": "stdout",
282
+ "output_type": "stream",
283
+ "text": [
284
+ "FAIL label: 68, i: 2 IoU: 0.49064401487956755\n",
285
+ "FAIL label: 69, i: 2 IoU: 0.9662692069365345\n",
286
+ "FAIL label: 71, i: 1 IoU: 0.09857753647298885\n"
287
+ ]
288
+ },
289
+ {
290
+ "name": "stderr",
291
+ "output_type": "stream",
292
+ "text": [
293
+ " 22%|█████████▎ | 84/377 [00:03<00:07, 38.05it/s]"
294
+ ]
295
+ },
296
+ {
297
+ "name": "stdout",
298
+ "output_type": "stream",
299
+ "text": [
300
+ "FAIL label: 74, i: 3 IoU: 0.5845239934642943\n",
301
+ "FAIL label: 81, i: 2 IoU: 0.7402716532101037\n"
302
+ ]
303
+ },
304
+ {
305
+ "name": "stderr",
306
+ "output_type": "stream",
307
+ "text": [
308
+ " 28%|███████████▋ | 107/377 [00:03<00:03, 69.94it/s]"
309
+ ]
310
+ },
311
+ {
312
+ "name": "stdout",
313
+ "output_type": "stream",
314
+ "text": [
315
+ "FAIL label: 91, i: 2 IoU: 0.30092583437382314\n",
316
+ "FAIL label: 106, i: 1 IoU: 0.5437761463648566\n",
317
+ "FAIL label: 110, i: 1 IoU: 0.978096219321612\n"
318
+ ]
319
+ },
320
+ {
321
+ "name": "stderr",
322
+ "output_type": "stream",
323
+ "text": [
324
+ " 43%|█████████████████▎ | 163/377 [00:04<00:01, 124.92it/s]"
325
+ ]
326
+ },
327
+ {
328
+ "name": "stdout",
329
+ "output_type": "stream",
330
+ "text": [
331
+ "FAIL label: 137, i: 1 IoU: 0.5768840711253176\n",
332
+ "FAIL label: 138, i: 1 IoU: 0.426858068191846\n",
333
+ "FAIL label: 142, i: 1 IoU: 1.0\n",
334
+ "FAIL label: 151, i: 1 IoU: 0.6865076393310577\n",
335
+ "FAIL label: 162, i: 1 IoU: 0.40902362440677925\n"
336
+ ]
337
+ },
338
+ {
339
+ "name": "stderr",
340
+ "output_type": "stream",
341
+ "text": [
342
+ "100%|██████████████���██████████████████████████| 377/377 [00:04<00:00, 82.43it/s]"
343
+ ]
344
+ },
345
+ {
346
+ "name": "stdout",
347
+ "output_type": "stream",
348
+ "text": [
349
+ "FAIL label: 166, i: 2 IoU: 0.3312197305714273\n"
350
+ ]
351
+ },
352
+ {
353
+ "name": "stderr",
354
+ "output_type": "stream",
355
+ "text": [
356
+ "\n"
357
+ ]
358
+ }
359
+ ],
360
+ "source": [
361
+ "failed_labels = []\n",
362
+ "failed_paths = []\n",
363
+ "\n",
364
+ "for label in tqdm(np.unique(labels)):\n",
365
+ " polys = [(all_polys[i], valid_fits_paths[i]) for i in range(len(labels)) if labels[i] == label]\n",
366
+ " if len(polys) > 1:\n",
367
+ " total_poly = polys[0][0]\n",
368
+ " for i in range(1, len(polys)):\n",
369
+ " new_poly = polys[i][0]\n",
370
+ " new_path = polys[i][1]\n",
371
+ " if total_poly.intersects_poly(new_poly):\n",
372
+ " union_over_max = total_poly.intersection(new_poly).area() / new_poly.area()\n",
373
+ " print(f\"FAIL label: {label}, i: {i} IoU: {union_over_max}\")\n",
374
+ " failed_labels.append(label)\n",
375
+ " failed_paths.append(new_path)\n",
376
+ " continue\n",
377
+ " else:\n",
378
+ " total_poly = total_poly.union(new_poly)"
379
+ ]
380
+ },
381
+ {
382
+ "cell_type": "code",
383
+ "execution_count": 18,
384
+ "id": "46c6217a",
385
+ "metadata": {},
386
+ "outputs": [
387
+ {
388
+ "data": {
389
+ "text/plain": [
390
+ "['./data/cube_center_run4203_camcol6_f746_73-5-800-800.fits',\n",
391
+ " './data/cube_center_run2700_camcol2_f56_86-5-800-800.fits',\n",
392
+ " './data/cube_center_run4198_camcol6_f243_3-5-800-800.fits',\n",
393
+ " './data/cube_center_run5658_camcol2_f33_25-5-800-800.fits',\n",
394
+ " './data/cube_center_run4933_camcol2_f695_81-5-800-800.fits',\n",
395
+ " './data/cube_center_run2709_camcol5_f236_72-5-800-800.fits',\n",
396
+ " './data/cube_center_run5637_camcol4_f385_1-5-800-800.fits',\n",
397
+ " './data/cube_center_run5759_camcol4_f118_72-5-800-800.fits',\n",
398
+ " './data/cube_center_run2700_camcol5_f163_86-5-800-800.fits',\n",
399
+ " './data/cube_center_run3434_camcol4_f456_34-5-800-800.fits',\n",
400
+ " './data/cube_center_run5792_camcol6_f342_73-5-800-800.fits',\n",
401
+ " './data/cube_center_run5918_camcol5_f278_69-5-800-800.fits',\n",
402
+ " './data/cube_center_run4128_camcol3_f475_1-5-800-800.fits',\n",
403
+ " './data/cube_center_run5590_camcol2_f272_56-5-800-800.fits',\n",
404
+ " './data/cube_center_run5836_camcol6_f545_52-5-800-800.fits',\n",
405
+ " './data/cube_center_run4933_camcol3_f554_85-5-800-800.fits',\n",
406
+ " './data/cube_center_run4128_camcol5_f348_8-5-800-800.fits',\n",
407
+ " './data/cube_center_run2886_camcol1_f164_78-5-800-800.fits',\n",
408
+ " './data/cube_center_run5642_camcol1_f374_80-5-800-800.fits',\n",
409
+ " './data/cube_center_run4188_camcol5_f87_42-5-800-800.fits',\n",
410
+ " './data/cube_center_run5628_camcol5_f238_69-5-800-800.fits',\n",
411
+ " './data/cube_center_run5781_camcol5_f291_76-5-800-800.fits']"
412
+ ]
413
+ },
414
+ "execution_count": 18,
415
+ "metadata": {},
416
+ "output_type": "execute_result"
417
+ }
418
+ ],
419
+ "source": [
420
+ "failed_paths"
421
+ ]
422
+ },
423
+ {
424
+ "cell_type": "code",
425
+ "execution_count": 29,
426
+ "id": "4af5240f",
427
+ "metadata": {},
428
+ "outputs": [
429
+ {
430
+ "data": {
431
+ "text/plain": [
432
+ "['tiny_train.jsonl', 'full_train.jsonl', 'full_test.jsonl', 'tiny_test.jsonl']"
433
+ ]
434
+ },
435
+ "execution_count": 29,
436
+ "metadata": {},
437
+ "output_type": "execute_result"
438
+ }
439
+ ],
440
+ "source": [
441
+ "os.listdir('./splits')"
442
+ ]
443
+ },
444
+ {
445
+ "cell_type": "code",
446
+ "execution_count": 42,
447
+ "id": "abce2e5a",
448
+ "metadata": {},
449
+ "outputs": [],
450
+ "source": [
451
+ "import pandas as pd\n",
452
+ "\n",
453
+ "# Path to the JSONL file\n",
454
+ "file_path = './splits/full_train.jsonl'\n",
455
+ "\n",
456
+ "# Read the JSONL file into a DataFrame\n",
457
+ "df_train = pd.read_json(file_path, lines=True)\n",
458
+ "\n",
459
+ "# Path to the JSONL file\n",
460
+ "file_path = './splits/full_test.jsonl'\n",
461
+ "\n",
462
+ "# Read the JSONL file into a DataFrame\n",
463
+ "df_test = pd.read_json(file_path, lines=True)"
464
+ ]
465
+ },
466
+ {
467
+ "cell_type": "code",
468
+ "execution_count": 49,
469
+ "id": "c5844d7a",
470
+ "metadata": {},
471
+ "outputs": [],
472
+ "source": [
473
+ "df = pd.concat([df_train, df_test])\n",
474
+ "df = df[~df['image'].isin(failed_paths)]"
475
+ ]
476
+ },
477
+ {
478
+ "cell_type": "code",
479
+ "execution_count": 54,
480
+ "id": "b94657a0",
481
+ "metadata": {},
482
+ "outputs": [
483
+ {
484
+ "name": "stdout",
485
+ "output_type": "stream",
486
+ "text": [
487
+ "Train and test datasets have been saved to 'train_data.csv' and 'test_data.csv'.\n"
488
+ ]
489
+ }
490
+ ],
491
+ "source": [
492
+ "import pandas as pd\n",
493
+ "from sklearn.model_selection import train_test_split\n",
494
+ "\n",
495
+ "# Assuming df is your DataFrame\n",
496
+ "# df = pd.DataFrame(...) # Your DataFrame should already be defined\n",
497
+ "\n",
498
+ "# Perform an 85/15 train-test split\n",
499
+ "train_df, test_df = train_test_split(df, test_size=0.15, random_state=42)\n",
500
+ "\n",
501
+ "# Save the train and test DataFrames to CSV files\n",
502
+ "train_df.to_csv('full_train.csv', index=False)\n",
503
+ "test_df.to_csv('full_test.csv', index=False)\n",
504
+ "\n",
505
+ "print(\"Train and test datasets have been saved to 'train_data.csv' and 'test_data.csv'.\")"
506
+ ]
507
+ },
508
+ {
509
+ "cell_type": "code",
510
+ "execution_count": 55,
511
+ "id": "987b9fd7",
512
+ "metadata": {},
513
+ "outputs": [
514
+ {
515
+ "name": "stdout",
516
+ "output_type": "stream",
517
+ "text": [
518
+ "2\n",
519
+ "1\n",
520
+ "Train and test datasets have been saved to 'train_data.csv' and 'test_data.csv'.\n"
521
+ ]
522
+ }
523
+ ],
524
+ "source": [
525
+ "import pandas as pd\n",
526
+ "\n",
527
+ "# Path to the JSONL file\n",
528
+ "file_path = './splits/tiny_train.jsonl'\n",
529
+ "\n",
530
+ "# Read the JSONL file into a DataFrame\n",
531
+ "df_train = pd.read_json(file_path, lines=True)\n",
532
+ "\n",
533
+ "# Path to the JSONL file\n",
534
+ "file_path = './splits/tiny_test.jsonl'\n",
535
+ "\n",
536
+ "# Read the JSONL file into a DataFrame\n",
537
+ "df_test = pd.read_json(file_path, lines=True)\n",
538
+ "\n",
539
+ "print(len(df_train))\n",
540
+ "print(len(df_test))\n",
541
+ "\n",
542
+ "# Save the train and test DataFrames to CSV files\n",
543
+ "df_train.to_csv('tiny_train.csv', index=False)\n",
544
+ "df_test.to_csv('tiny_test.csv', index=False)\n",
545
+ "\n",
546
+ "print(\"Train and test datasets have been saved to 'train_data.csv' and 'test_data.csv'.\")"
547
+ ]
548
+ },
549
+ {
550
+ "cell_type": "code",
551
+ "execution_count": 59,
552
+ "id": "dd0209ef",
553
+ "metadata": {},
554
+ "outputs": [
555
+ {
556
+ "name": "stdout",
557
+ "output_type": "stream",
558
+ "text": [
559
+ "CSV file has been converted and saved as JSONL at ./splits/tiny_train.jsonl\n",
560
+ "CSV file has been converted and saved as JSONL at ./splits/tiny_test.jsonl\n",
561
+ "CSV file has been converted and saved as JSONL at ./splits/full_train.jsonl\n",
562
+ "CSV file has been converted and saved as JSONL at ./splits/full_test.jsonl\n"
563
+ ]
564
+ }
565
+ ],
566
+ "source": [
567
+ "import pandas as pd\n",
568
+ "\n",
569
+ "names = [\"./splits/tiny_train\", \"./splits/tiny_test\", \"./splits/full_train\", \"./splits/full_test\"]\n",
570
+ "\n",
571
+ "for name in names:\n",
572
+ "\n",
573
+ " # Step 1: Load the CSV file into a DataFrame\n",
574
+ " csv_file_path = f'{name}.csv' # Replace with your actual CSV file path\n",
575
+ " df = pd.read_csv(csv_file_path)\n",
576
+ "\n",
577
+ " # Step 2: Save the DataFrame as a JSONL file\n",
578
+ " jsonl_file_path = f'{name}.jsonl' # Replace with your desired output file path\n",
579
+ " df.to_json(jsonl_file_path, orient='records', lines=True)\n",
580
+ "\n",
581
+ " print(f\"CSV file has been converted and saved as JSONL at {jsonl_file_path}\")"
582
+ ]
583
+ },
584
+ {
585
+ "cell_type": "code",
586
+ "execution_count": null,
587
+ "id": "dfafd26c",
588
+ "metadata": {},
589
+ "outputs": [],
590
+ "source": []
591
+ }
592
+ ],
593
+ "metadata": {
594
+ "kernelspec": {
595
+ "display_name": "Python 3 (ipykernel)",
596
+ "language": "python",
597
+ "name": "python3"
598
+ },
599
+ "language_info": {
600
+ "codemirror_mode": {
601
+ "name": "ipython",
602
+ "version": 3
603
+ },
604
+ "file_extension": ".py",
605
+ "mimetype": "text/x-python",
606
+ "name": "python",
607
+ "nbconvert_exporter": "python",
608
+ "pygments_lexer": "ipython3",
609
+ "version": "3.10.13"
610
+ }
611
+ },
612
+ "nbformat": 4,
613
+ "nbformat_minor": 5
614
+ }