Skip to content

Commit

Permalink
Merge pull request #277 from roman-corgi/photon_counting
Browse files Browse the repository at this point in the history
Photon counting
  • Loading branch information
semaphoreP authored Feb 14, 2025
2 parents 9088894 + af86a28 commit 07f4f4d
Show file tree
Hide file tree
Showing 36 changed files with 1,453 additions and 114 deletions.
10 changes: 4 additions & 6 deletions corgidrp/caldb.py
Original file line number Diff line number Diff line change
Expand Up @@ -168,7 +168,7 @@ def _get_values_from_entry(self, entry, is_calib=True):
drp_version,
obsid,
naxis1,
naxis2,
naxis2
]

# rest are ext_hdr keys we can copy
Expand Down Expand Up @@ -243,7 +243,7 @@ def remove_entry(self, entry, to_disk=True):

def get_calib(self, frame, dtype, to_disk=True):
"""
Outputs the best calibration file of the given type for the input sciene frame.
Outputs the best calibration file of the given type for the input science frame.
Args:
frame (corgidrp.data.Image): an image frame to request a calibration for. If None is passed in, looks for the
Expand Down Expand Up @@ -288,14 +288,12 @@ def get_calib(self, frame, dtype, to_disk=True):
options = calibdf.loc[
(
(calibdf["EXPTIME"] == frame_dict["EXPTIME"])
& (calibdf["NAXIS1"] == frame_dict["NAXIS1"])
& (calibdf["NAXIS2"] == frame_dict["NAXIS2"])
)
]

if len(options) == 0:
raise ValueError("No valid Dark with EXPTIME={0} and dimension ({1},{2})"
.format(frame_dict["EXPTIME"], frame_dict["NAXIS1"], frame_dict["NAXIS2"]))
raise ValueError("No valid Dark with EXPTIME={0})"
.format(frame_dict["EXPTIME"]))

# select the one closest in time
result_index = np.abs(options["MJD"] - frame_dict["MJD"]).argmin()
Expand Down
55 changes: 34 additions & 21 deletions corgidrp/calibrate_kgain.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
from corgidrp.detector import slice_section, detector_areas

# Dictionary with constant kgain calibration parameters
kgain_params= {
kgain_params_default= {
# ROI constants
'rowroi1': 9,
'rowroi2': 1000,
Expand All @@ -30,34 +30,30 @@
'signal_bins_N': 400,
}

def check_kgain_params(
):
""" Checks integrity of kgain parameters in the dictionary kgain_params. """
if 'offset_colroi1' not in kgain_params:
raise ValueError('Missing parameter in directory pointer YAML file.')
if 'offset_colroi2' not in kgain_params:
raise ValueError('Missing parameter in directory pointer YAML file.')
def check_kgain_params(kgain_params):
""" Checks integrity of kgain parameters in the dictionary kgain_params.
Args:
kgain_params (dict): Dictionary of parameters used for calibrating the k gain.
"""

if 'rowroi1' not in kgain_params:
raise ValueError('Missing parameter in directory pointer YAML file.')
raise ValueError('Missing parameter: rowroi1.')
if 'rowroi2' not in kgain_params:
raise ValueError('Missing parameter in directory pointer YAML file.')
raise ValueError('Missing parameter: rowroi2.')
if 'colroi1' not in kgain_params:
raise ValueError('Missing parameter in directory pointer YAML file.')
raise ValueError('Missing parameter: colroi1.')
if 'colroi2' not in kgain_params:
raise ValueError('Missing parameter in directory pointer YAML file.')
raise ValueError('Missing parameter: colroi2.')
if 'rn_bins1' not in kgain_params:
raise ValueError('Missing parameter in directory pointer YAML file.')
raise ValueError('Missing parameter: rn_bins1.')
if 'rn_bins2' not in kgain_params:
raise ValueError('Missing parameter in directory pointer YAML file.')
raise ValueError('Missing parameter: rn_bins2.')
if 'max_DN_val' not in kgain_params:
raise ValueError('Missing parameter in directory pointer YAML file.')
raise ValueError('Missing parameter: max_DN_val.')
if 'signal_bins_N' not in kgain_params:
raise ValueError('Missing parameter in directory pointer YAML file.')
raise ValueError('Missing parameter: signal_bins_N.')

if not isinstance(kgain_params['offset_colroi1'], (float, int)):
raise TypeError('offset_colroi1 is not a number')
if not isinstance(kgain_params['offset_colroi2'], (float, int)):
raise TypeError('offset_colroi2 is not a number')
if not isinstance(kgain_params['rowroi1'], (float, int)):
raise TypeError('rowroi1 is not a number')
if not isinstance(kgain_params['rowroi2'], (float, int)):
Expand Down Expand Up @@ -281,7 +277,7 @@ def calibrate_kgain(dataset_kgain,
n_cal=10, n_mean=30, min_val=800, max_val=3000, binwidth=68,
make_plot=True,plot_outdir='figures', show_plot=False,
logspace_start=-1, logspace_stop=4, logspace_num=200,
verbose=False, detector_regions=None):
verbose=False, detector_regions=None, kgain_params=None):
"""
kgain (e-/DN) is calculated from the means and variances
within the defined minimum and maximum mean values. A photon transfer curve
Expand Down Expand Up @@ -344,12 +340,29 @@ def calibrate_kgain(dataset_kgain,
detector_regions (dict): a dictionary of detector geometry properties.
Keys should be as found in detector_areas in detector.py. Defaults to
that dictionary.
kgain_params (dict): (Optional) Dictionary containing row and col specifications
for the region of interest (indicated by 'rowroi1','rowroi2','colroi1',and 'colroi2').
The 'roi' needs one square region specified, and 'back' needs two square regions,
where a '1' ending indicates the smaller of two values, and a '2' ending indicates the larger
of two values. The coordinates of the square region are specified by matching
up as follows: (rowroi1, colroi1), (rowroi2, colroi1), etc.
Also must contain:
'rn_bins1': lower bound of counts histogram for fitting or read noise
'rn_bins2': upper bound of counts histogram for fitting or read noise
'max_DN_val': maximum DN value to be included in photon transfer curve (PTC)
'signal_bins_N': number of bins in the signal variables of PTC curve
Defaults to kgain_params_default included in this file.
Returns:
corgidrp.data.KGain: kgain estimate from the least-squares fit to the photon
transfer curve (in e-/DN). The expected value of kgain for EXCAM with
flight readout sequence should be between 8 and 9 e-/DN
"""
if kgain_params is None:
kgain_params = kgain_params_default

check_kgain_params(kgain_params)

if detector_regions is None:
detector_regions = detector_areas

Expand Down
60 changes: 39 additions & 21 deletions corgidrp/calibrate_nonlin.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
from corgidrp.calibrate_kgain import CalKgainException

# Dictionary with constant non-linearity calibration parameters
nonlin_params = {
nonlin_params_default = {
# ROI constants
'rowroi1': 305,
'rowroi2': 736,
Expand Down Expand Up @@ -44,41 +44,44 @@
'min_mask_factor': 1.1,
}

def check_nonlin_params(
):
""" Checks integrity of kgain parameters in the dictionary nonlin_params. """
def check_nonlin_params(nonlin_params):
""" Checks integrity of kgain parameters in the dictionary nonlin_params.
Args:
nonlin_params (dict): Dictionary of parameters used for calibrating nonlinearity.
"""
if 'rowroi1' not in nonlin_params:
raise ValueError('Missing parameter in directory pointer YAML file.')
raise ValueError('Missing parameter: rowroi1.')
if 'rowroi2' not in nonlin_params:
raise ValueError('Missing parameter in directory pointer YAML file.')
raise ValueError('Missing parameter: rowroi2.')
if 'colroi1' not in nonlin_params:
raise ValueError('Missing parameter in directory pointer YAML file.')
raise ValueError('Missing parameter: colroi1.')
if 'colroi2' not in nonlin_params:
raise ValueError('Missing parameter in directory pointer YAML file.')
raise ValueError('Missing parameter: colroi2.')
if 'rowback11' not in nonlin_params:
raise ValueError('Missing parameter in directory pointer YAML file.')
raise ValueError('Missing parameter: rowback11.')
if 'rowback12' not in nonlin_params:
raise ValueError('Missing parameter in directory pointer YAML file.')
raise ValueError('Missing parameter: rowback12.')
if 'rowback21' not in nonlin_params:
raise ValueError('Missing parameter in directory pointer YAML file.')
raise ValueError('Missing parameter: rowback21.')
if 'rowback22' not in nonlin_params:
raise ValueError('Missing parameter in directory pointer YAML file.')
raise ValueError('Missing parameter: rowback22.')
if 'colback11' not in nonlin_params:
raise ValueError('Missing parameter in directory pointer YAML file.')
raise ValueError('Missing parameter: colback11.')
if 'colback12' not in nonlin_params:
raise ValueError('Missing parameter in directory pointer YAML file.')
raise ValueError('Missing parameter: colback12.')
if 'colback21' not in nonlin_params:
raise ValueError('Missing parameter in directory pointer YAML file.')
raise ValueError('Missing parameter: colback21.')
if 'colback22' not in nonlin_params:
raise ValueError('Missing parameter in directory pointer YAML file.')
raise ValueError('Missing parameter: colback22.')
if 'min_exp_time' not in nonlin_params:
raise ValueError('Missing parameter in directory pointer YAML file.')
raise ValueError('Missing parameter: min_exp_time.')
if 'num_bins' not in nonlin_params:
raise ValueError('Missing parameter in directory pointer YAML file.')
raise ValueError('Missing parameter: num_bins.')
if 'min_bin' not in nonlin_params:
raise ValueError('Missing parameter in directory pointer YAML file.')
raise ValueError('Missing parameter: min_bin.')
if 'min_mask_factor' not in nonlin_params:
raise ValueError('Missing parameter in directory pointer YAML file.')
raise ValueError('Missing parameter: min_mask_factor.')

if not isinstance(nonlin_params['rowroi1'], (float, int)):
raise TypeError('rowroi1 is not a number')
Expand Down Expand Up @@ -124,7 +127,7 @@ def calibrate_nonlin(dataset_nl,
pfit_upp_cutoff1 = -2, pfit_upp_cutoff2 = -3,
pfit_low_cutoff1 = 2, pfit_low_cutoff2 = 1,
make_plot=True, plot_outdir='figures', show_plot=False,
verbose=False):
verbose=False, nonlin_params=None):
"""
Function that derives the non-linearity calibration table for a set of DN
and EM values.
Expand Down Expand Up @@ -210,13 +213,28 @@ def calibrate_nonlin(dataset_nl,
show_plot (bool): (Optional) display the plots. Default is False.
verbose (bool): (Optional) display various diagnostic print messages.
Default is False.
nonlin_params (dict): (Optional) Dictionary of row and col specifications
for the region of interest (indicated by 'roi') where the frame is illuminated and for
two background regions (indicated by 'back1' and 'back2') where the frame is not illuminated.
Must contain 'rowroi1','rowroi2','colroi1','colroi2','rowback11','rowback12',
'rowback21','rowback22','colback11','colback12','colback21',and 'colback22'.
The 'roi' needs one square region specified, and 'back' needs two square regions,
where a '1' ending indicates the smaller of two values, and a '2' ending indicates the larger
of two values. The coordinates of each square are specified by matching
up as follows: (rowroi1, colroi1), (rowroi1, colroi2), (rowback11, colback11),
(rowback11, colback12), etc. Defaults to nonlin_params_default specified in this file.
Returns:
nonlin_arr (NonLinearityCalibration): 2-D array with nonlinearity values
for input signal level (DN) in rows and EM gain values in columns. The
input signal in DN is the first column. Signal values start with min_write
and run through max_write in steps of 20 DN.
"""
if nonlin_params is None:
nonlin_params = nonlin_params_default

check_nonlin_params(nonlin_params)

# dataset_nl.all_data must be 3-D
if np.ndim(dataset_nl.all_data) != 3:
raise Exception('dataset_nl.all_data must be 3-D')
Expand Down
2 changes: 1 addition & 1 deletion corgidrp/combine.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ def combine_images(data_subset, err_subset, dq_subset, collapse, num_frames_scal

# dq collpase: keep all flags on
dq_collapse = np.bitwise_or.reduce(dq_subset, axis=0)
# except those pixels that have been replaced
# except for those pixels that have been replaced with good values
dq_collapse[np.where((dq_collapse > 0) & (~np.isnan(data_collapse)))] = 0

return data_collapse, err_collapse, dq_collapse
Expand Down
42 changes: 22 additions & 20 deletions corgidrp/darks.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,14 +105,14 @@ def mean_combine(image_list, bpmap_list, err=False):
sum_im += masked
map_im += (im_m.mask == False).astype(int)

if err: # sqrt of sum of sigma**2 terms
sum_im = np.sqrt(sum_im)

# Divide sum_im by map_im only where map_im is not equal to 0 (i.e.,
# not masked).
# Where map_im is equal to 0, set combined_im to zero
comb_image = np.divide(sum_im, map_im, out=np.zeros_like(sum_im),
where=map_im != 0)

if err: # (sqrt of sum of sigma**2 terms)/sqrt(n)
comb_image = np.sqrt(comb_image)

# Mask any value that was never mapped (aka masked in every frame)
comb_bpmap = (map_im == 0).astype(int)
Expand All @@ -133,12 +133,13 @@ def build_trad_dark(dataset, detector_params, detector_regions=None, full_frame=
- have had masks made for cosmic rays
- have been corrected for nonlinearity
- have been converted from DN to e-
- have been desmeared if desmearing is appropriate. Under normal
circumstances, darks should not be desmeared. The only time desmearing
would be useful is in the unexpected case that, for example,
dark current is so high that it stands far above other noise that is
not smeared upon readout, such as clock-induced charge,
fixed-pattern noise, and read noise.
- have NOT been desmeared. Darks should not be desmeared. The only component
of dark frames that would be subject to a smearing effect is dark current
since it linearly increases with time, so the extra row read time affects
the dark current per pixel. However, illuminated images
would also contain this smeared dark current, so dark subtraction should
remove this smeared dark current (and then desmearing may be applied to the
processed image if appropriate).
Also, add_photon_noise() should NOT have been applied to the frames in
dataset. And note that creation of the
Expand Down Expand Up @@ -268,12 +269,13 @@ def calibrate_darks_lsq(dataset, detector_params, detector_regions=None):
- have had masks made for cosmic rays
- have been corrected for nonlinearity
- have been converted from DN to e-
- have been desmeared if desmearing is appropriate. Under normal
circumstances, darks should not be desmeared. The only time desmearing
would be useful is in the unexpected case that, for example,
dark current is so high that it stands far above other noise that is
not smeared upon readout, such as clock-induced charge,
fixed-pattern noise, and read noise.
- have NOT been desmeared. Darks should not be desmeared. The only component
of dark frames that would be subject to a smearing effect is dark current
since it linearly increases with time, so the extra row read time affects
the dark current per pixel. However, illuminated images
would also contain this smeared dark current, so dark subtraction should
remove this smeared dark current (and then desmearing may be applied to the
processed image if appropriate).
Also, add_photon_noise() should NOT have been applied to the frames in
dataset. And note that creation of the
Expand Down Expand Up @@ -329,7 +331,7 @@ def calibrate_darks_lsq(dataset, detector_params, detector_regions=None):
output Dark's dq after assigning these pixels a flag value of 256.
They should have large err values.
The pixels that are masked for EVERY frame in all sub-stacks
but 4 (or less) are assigned a flag value of
but 3 (or less) are assigned a flag value of
1, which falls under the category of "Bad pixel - unspecified reason".
These pixels would have no reliability for dark subtraction.
Expand Down Expand Up @@ -415,7 +417,7 @@ def calibrate_darks_lsq(dataset, detector_params, detector_regions=None):
output Dark's dq after assigning these pixels a flag value of 256.
They should have large err values.
The pixels that are masked for EVERY frame in all sub-stacks
but 4 (or less) are assigned a flag value of
but 3 (or less) are assigned a flag value of
1, which falls under the category of "Bad pixel - unspecified reason".
These pixels would have no reliability for dark subtraction.
FPN_std_map : array-like (full frame)
Expand Down Expand Up @@ -522,7 +524,7 @@ def calibrate_darks_lsq(dataset, detector_params, detector_regions=None):
# flag value of 256; unreliable pixels, large err
output_dq = (unreliable_pix_map >= len(datasets)-3).astype(int)*256
# flag value of 1 for those that are masked all the way through for all
# but 4 (or less) stacks; this overwrites the flag value of 256 that was assigned to
# but 3 (or less) stacks; this overwrites the flag value of 256 that was assigned to
# these pixels in previous line
unfittable_ind = np.where(unfittable_pix_map >= len(datasets)-3)
output_dq[unfittable_ind] = 1
Expand Down Expand Up @@ -577,7 +579,7 @@ def calibrate_darks_lsq(dataset, detector_params, detector_regions=None):
# input data error comes from .err arrays; could use this for error bars
# in input data for weighted least squares, but we'll just simply get the
# std error and add it in quadrature to least squares fit standard dev
stacks_err = np.sqrt(np.sum(mean_err_stack**2, axis=0))/len(mean_err_stack)
stacks_err = np.sqrt(np.sum(mean_err_stack**2, axis=0)/np.sqrt(len(mean_err_stack)))

# matrix to be used for least squares and covariance matrix
X = np.array([np.ones([len(EMgain_arr)]), EMgain_arr, EMgain_arr*exptime_arr]).T
Expand Down Expand Up @@ -865,5 +867,5 @@ def build_synthesized_dark(dataset, noisemaps, detector_regions=None, full_frame

master_dark = Dark(md_data, prihdr, exthdr, input_data, md_noise, FDCdq,
errhdr)

return master_dark
Loading

0 comments on commit 07f4f4d

Please sign in to comment.