Skip to content

Commit 816ccce

Browse files
Revert "Normalized Mean Model and PSF metrics (#65)" (#70)
This reverts commit 846f17a.
1 parent 846f17a commit 816ccce

File tree

2 files changed

+2
-189
lines changed

2 files changed

+2
-189
lines changed

src/psfmachine/machine.py

Lines changed: 2 additions & 183 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,6 @@
55
import numpy as np
66
import pandas as pd
77
from scipy import sparse
8-
from scipy import stats
98
import astropy.units as u
109
from tqdm import tqdm
1110
import matplotlib.pyplot as plt
@@ -1057,11 +1056,8 @@ def build_shape_model(
10571056

10581057
self.psf_w = psf_w
10591058
self.psf_w_err = psf_w_err
1060-
self.normalized_shape_model = False
10611059

10621060
# We then build the same design matrix for all pixels with flux
1063-
# this non-normalized mean model is temporary and used to re-create a better
1064-
# `source_mask`
10651061
self._get_mean_model()
10661062
# remove background pixels and recreate mean model
10671063
self._update_source_mask_remove_bkg_pixels(
@@ -1140,8 +1136,8 @@ def _update_source_mask_remove_bkg_pixels(self, flux_cut_off=1, frame_index="mea
11401136
# (self.mean_model.max(axis=1) < 1)
11411137
# )
11421138

1143-
# create the final normalized mean model!
1144-
self._get_normalized_mean_model()
1139+
# Recreate mean model!
1140+
self._get_mean_model()
11451141

11461142
def _get_mean_model(self):
11471143
"""Convenience function to make the scene model"""
@@ -1163,183 +1159,6 @@ def _get_mean_model(self):
11631159
mean_model.eliminate_zeros()
11641160
self.mean_model = mean_model
11651161

1166-
def _get_normalized_mean_model(self, npoints=300, plot=False):
1167-
"""Renomarlize shape model to sum 1"""
1168-
1169-
# create a high resolution polar grid
1170-
r = self.source_mask.multiply(self.r).data
1171-
phi_hd = np.linspace(-np.pi, np.pi, npoints)
1172-
r_hd = np.linspace(0, r.max(), npoints)
1173-
phi_hd, r_hd = np.meshgrid(phi_hd, r_hd)
1174-
1175-
# high res DM
1176-
Ap = _make_A_polar(
1177-
phi_hd.ravel(),
1178-
r_hd.ravel(),
1179-
rmin=self.rmin,
1180-
rmax=self.rmax,
1181-
cut_r=self.cut_r,
1182-
n_r_knots=self.n_r_knots,
1183-
n_phi_knots=self.n_phi_knots,
1184-
)
1185-
# evaluate the high res model
1186-
mean_model_hd = Ap.dot(self.psf_w)
1187-
mean_model_hd[~np.isfinite(mean_model_hd)] = np.nan
1188-
mean_model_hd = mean_model_hd.reshape(phi_hd.shape)
1189-
1190-
# mask out datapoint that don't contribuite to the psf
1191-
mean_model_hd_ma = mean_model_hd.copy()
1192-
mask = mean_model_hd > -3
1193-
mean_model_hd_ma[~mask] = -np.inf
1194-
mask &= ~((r_hd > 14) & (np.gradient(mean_model_hd_ma, axis=0) > 0))
1195-
mean_model_hd_ma[~mask] = -np.inf
1196-
1197-
# double integral using trapezoidal rule
1198-
integral = np.trapz(
1199-
np.trapz(10 ** mean_model_hd_ma, r_hd[:, 0], axis=0),
1200-
phi_hd[0, :],
1201-
axis=0,
1202-
)
1203-
# renormalize weights and build new shape model
1204-
if not self.normalized_shape_model:
1205-
self.psf_w *= np.log10(integral)
1206-
self.normalized_shape_model = True
1207-
self._get_mean_model()
1208-
1209-
if plot:
1210-
fig, ax = plt.subplots(1, 2, figsize=(9, 5))
1211-
im = ax[0].scatter(
1212-
phi_hd.ravel(),
1213-
r_hd.ravel(),
1214-
c=mean_model_hd_ma.ravel(),
1215-
vmin=-3,
1216-
vmax=-1,
1217-
s=1,
1218-
label=r"$\int = $" + f"{integral:.4f}",
1219-
)
1220-
im = ax[1].scatter(
1221-
r_hd.ravel() * np.cos(phi_hd.ravel()),
1222-
r_hd.ravel() * np.sin(phi_hd.ravel()),
1223-
c=mean_model_hd_ma.ravel(),
1224-
vmin=-3,
1225-
vmax=-1,
1226-
s=1,
1227-
)
1228-
ax[0].legend()
1229-
fig.colorbar(im, ax=ax, location="bottom")
1230-
plt.show()
1231-
1232-
def get_psf_metrics(self, npoints_per_pixel=10):
1233-
"""
1234-
Computes three metrics for the PSF model:
1235-
source_psf_fraction: the amount of PSF in the data. Tells how much of a
1236-
sources is used to estimate the PSF, values are in between [0, 1].
1237-
perturbed_ratio_mean: the ratio between the mean model and perturbed model
1238-
for each source. Usefull to find when the time model affects the
1239-
mean value of the light curve.
1240-
perturbed_std: the standard deviation of the perturbed model for each
1241-
source. USeful to find when the time model introduces variability in the
1242-
light curve.
1243-
1244-
If npoints_per_pixel > 0, it creates high npoints_per_pixel shape models for each source by
1245-
dividing each pixels into a grid of [npoints_per_pixel x npoints_per_pixel]. This provides
1246-
a better estimate of `source_psf_fraction`.
1247-
1248-
Parameters
1249-
----------
1250-
npoints_per_pixel : int
1251-
Value in which each pixel axis is split to increase npoints_per_pixel. Default is
1252-
0 for no subpixel npoints_per_pixel.
1253-
1254-
"""
1255-
if npoints_per_pixel > 0:
1256-
# find from which observation (TPF) a sources comes
1257-
obs_per_pixel = self.source_mask.multiply(self.pix2obs).tocsr()
1258-
tpf_idx = []
1259-
for k in range(self.source_mask.shape[0]):
1260-
pix = obs_per_pixel[k].data
1261-
mode = stats.mode(pix)[0]
1262-
if len(mode) > 0:
1263-
tpf_idx.append(mode[0])
1264-
else:
1265-
tpf_idx.append(
1266-
[x for x, ss in enumerate(self.tpf_meta["sources"]) if k in ss][
1267-
0
1268-
]
1269-
)
1270-
tpf_idx = np.array(tpf_idx)
1271-
1272-
# get the pix coord for each source, we know how to increase resolution in
1273-
# the pixel space but not in WCS
1274-
row = self.source_mask.multiply(self.row).tocsr()
1275-
col = self.source_mask.multiply(self.column).tocsr()
1276-
mean_model_hd_sum = []
1277-
# iterating per sources avoids creating a new super large `source_mask`
1278-
# with high resolution, which a priori is hard
1279-
for k in range(self.nsources):
1280-
# find row, col combo for each source
1281-
row_ = row[k].data
1282-
col_ = col[k].data
1283-
colhd, rowhd = [], []
1284-
# pixels are divided into `resolution` - 1 subpixels
1285-
for c, r in zip(col_, row_):
1286-
x = np.linspace(c - 0.5, c + 0.5, npoints_per_pixel + 1)
1287-
y = np.linspace(r - 0.5, r + 0.5, npoints_per_pixel + 1)
1288-
x, y = np.meshgrid(x, y)
1289-
colhd.extend(x[:, :-1].ravel())
1290-
rowhd.extend(y[:-1].ravel())
1291-
colhd = np.array(colhd)
1292-
rowhd = np.array(rowhd)
1293-
# convert to ra, dec beacuse machine shape model works in sky coord
1294-
rahd, dechd = self.tpfs[tpf_idx[k]].wcs.wcs_pix2world(
1295-
colhd - self.tpfs[tpf_idx[k]].column,
1296-
rowhd - self.tpfs[tpf_idx[k]].row,
1297-
0,
1298-
)
1299-
drahd = rahd - self.sources["ra"][k]
1300-
ddechd = dechd - self.sources["dec"][k]
1301-
drahd = drahd * (u.deg)
1302-
ddechd = ddechd * (u.deg)
1303-
rhd = np.hypot(drahd, ddechd).to("arcsec").value
1304-
phihd = np.arctan2(ddechd, drahd).value
1305-
# create a high resolution DM
1306-
Ap = _make_A_polar(
1307-
phihd.ravel(),
1308-
rhd.ravel(),
1309-
rmin=self.rmin,
1310-
rmax=self.rmax,
1311-
cut_r=self.cut_r,
1312-
n_r_knots=self.n_r_knots,
1313-
n_phi_knots=self.n_phi_knots,
1314-
)
1315-
# evaluate the HD model
1316-
modelhd = 10 ** Ap.dot(self.psf_w)
1317-
# compute the model sum for source, how much of the source is in data
1318-
mean_model_hd_sum.append(
1319-
np.trapz(modelhd, dx=1 / npoints_per_pixel ** 2)
1320-
)
1321-
1322-
# get normalized psf fraction metric
1323-
self.source_psf_fraction = np.array(
1324-
mean_model_hd_sum
1325-
) # / np.nanmax(mean_model_hd_sum)
1326-
else:
1327-
self.source_psf_fraction = np.array(self.mean_model.sum(axis=1)).ravel()
1328-
1329-
# time model metrics
1330-
if hasattr(self, "P"):
1331-
perturbed_lcs = np.vstack(
1332-
[
1333-
np.array(self.perturbed_model(time_index=k).sum(axis=1)).ravel()
1334-
for k in range(self.time.shape[0])
1335-
]
1336-
)
1337-
self.perturbed_ratio_mean = (
1338-
np.nanmean(perturbed_lcs, axis=0)
1339-
/ np.array(self.mean_model.sum(axis=1)).ravel()
1340-
)
1341-
self.perturbed_std = np.nanstd(perturbed_lcs, axis=0)
1342-
13431162
def plot_shape_model(self, radius=20, frame_index="mean", bin_data=False):
13441163
"""
13451164
Diagnostic plot of shape model.

src/psfmachine/tpf.py

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -689,8 +689,6 @@ def load_shape_model(self, input=None, plot=False):
689689
self.rmax = hdu[1].header["rmax"]
690690
self.cut_r = hdu[1].header["cut_r"]
691691
self.psf_w = hdu[1].data["psf_w"]
692-
# read from header if weights come from a normalized model.
693-
self.normalized_shape_model = bool(hdu[1].header.get("normalized"))
694692
del hdu
695693

696694
# create mean model, but PRF shapes from FFI are in pixels! and TPFMachine
@@ -750,10 +748,6 @@ def save_shape_model(self, output=None):
750748
)
751749
# spline degree is hardcoded in `_make_A_polar` implementation.
752750
table.header["spln_deg"] = (3, "Degree of the spline basis")
753-
table.header["normalized"] = (
754-
int(self.normalized_shape_model),
755-
"Normalized weights",
756-
)
757751

758752
table.writeto(output, checksum=True, overwrite=True)
759753

0 commit comments

Comments
 (0)