diff --git a/.gitattributes b/.gitattributes index 7df623c2..21a8d3c8 100644 --- a/.gitattributes +++ b/.gitattributes @@ -3,3 +3,4 @@ tests/test_data/medcombined-neptune_band_1.fits filter=lfs diff=lfs merge=lfs -t tests/test_data/medcombined-neptune_band_4.fits filter=lfs diff=lfs merge=lfs -text tests/test_data/medcombined-uranus_band_4.fits filter=lfs diff=lfs merge=lfs -text tests/test_data/medcombined-uranus_band_1.fits filter=lfs diff=lfs merge=lfs -text +tests/test_data/mock_northup.fits filter=lfs diff=lfs merge=lfs -text diff --git a/README.md b/README.md index 5260b162..fb63ec85 100644 --- a/README.md +++ b/README.md @@ -15,7 +15,15 @@ That configuration directory will be used to locate things on your computer such ### For Developers -Large binary files (used in tests) are stored in Git LFS. You may need to run `git lfs pull` after checking out the repository to download the latest large binary files, or the unit tests may fail. +Large binary files (used in tests) are stored in Git LFS. [Install Git LFS](https://docs.github.com/en/repositories/working-with-files/managing-large-files/installing-git-large-file-storage) if it isn't already installed. You may need to run `git lfs pull` after checking out the repository to download the latest large binary files, or the unit tests may fail. + +To run the existing end-to-end tests, you also need the II&T code, which is used directly for comparing results. This also requires Git LFS to be installed first. Then install the II&T code by doing the following while in the top-level folder: + +``` +pip install -r requirements_e2etests.txt corgidrp +``` + +This will install the II&T repositories `cal` and `proc_cgi_frame`. ### Troubleshooting @@ -87,11 +95,11 @@ def example_step(dataset, calib_data, tuneable_arg=1, another_arg="test"): Inside the function can be nearly anything you want, but the function signature and start/end of the function should follow a few rules. - * Each function should include a docstring that descibes what the function is doing, what the inputs (including units if appropriate) are and what the outputs (also with units). The dosctrings should be [goggle style docstrings](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html). + * Each function should include a docstring that describes what the function is doing, what the inputs (including units if appropriate) are and what the outputs (also with units). The dosctrings should be [google style docstrings](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html). * The input dataset should always be the first input * Additional arguments and keywords exist only if you need them--many relevant parameters might already by in Dataset headers. A pipeline step can only have a single argument (the input dataset) if needed. * All additional function arguments/keywords should only consist of the following types: int, float, str, or a class defined in corgidrp.Data. - * (Long explaination for the curious: The reason for this is that pipeline steps can be written out as text files. Int/float/str are easily represented succinctly by textfiles. All classes in corgidrp.Data can be created simply by passing in a filepath. Therefore, all pipeline steps have easily recordable arguments for easy reproducibility.) + * (Long explanation for the curious: The reason for this is that pipeline steps can be written out as text files. Int/float/str are easily represented succinctly by textfiles. All classes in corgidrp.Data can be created simply by passing in a filepath. Therefore, all pipeline steps have easily recordable arguments for easy reproducibility.) * The first line of the function generally should be creating a copy of the dataset (which will be the output dataset). This way, the output dataset is not the same instance as the input dataset. This will make it easier to ensure reproducibility. * The function should always end with updating the header and (typically) the data of the output dataset. The history of running this pipeline step should be recorded in the header. @@ -120,12 +128,12 @@ End-to-end testing refers to processing data as one would when we get the real d 1. Write a recipe that produces the desired processed data product starting from L1 data. You will need to determine the series of step functions that need to be run, and what kind of arguments should be modified (e.g., whether prescan columns pixels should be cropped). Refer to the existing recipes in `corgidrp/recipe_templates` as examples and double check all the necessary steps in the FDD. 2. Obtain TVAC L1 data from our Box folder (ask Alex Greenbaum or Jason if you don't have access). For some situations (e.g., boresight), there may not be appropriate TVAC data. In those cases, write a piece of code that uses the images from TVAC to provide realistic noise and add it to mock data (i.e., the ones generated for the unit testing) to create mock L1 data. 3. Write an end-to-end test that processes the L1 data through the new recipe you created using the corgidrp.walker framework - - You will probably need to modify the `corgidrp.walker.guess_template()` function to add logic for determining when to use your recipe based on header keywords (e.g., OBSTYPE). Ask Jason, who developed this framework, if it is not clear what should be done. + - You will probably need to modify the `corgidrp.walker.guess_template()` function to add logic for determining when to use your recipe based on header keywords (e.g., VISTYPE). Ask Jason, who developed this framework, if it is not clear what should be done. - Your recipe may require other calibratio files. For now, create them as part of the setup process during the script (see `tests/e2e_tests/l1_to_l2b_e2e.py` for examples of how to do this for each type of calibration) - if you need to create mock L1 data, please do it in the script as well. - See the existing tests in `tests/e2e_tests/` for how to structure this script. You should only need to write a single script. 4. Test that the script runs successfully on your local machine and produces the expected output. Debug as necessary. When appropriate, test your results against those obtained from the II&T/TVAC pipeline using the same input data. - 5. Determine how resource intensive your recipe is. There are many ways to do this, but Linux users can run `/usr/bin/time -v python your_e2e_test.py` and Mac userse can run `/usr/bin/time -l -h -p python `. Record elapsed (wall clock) time, the percent of CPU this job got (only if parallelization was used), and total memory used (labelled "Maximum resident set size"). + 5. Determine how resource intensive your recipe is. There are many ways to do this, but Linux users can run `/usr/bin/time -v python your_e2e_test.py` and Mac users can run `/usr/bin/time -l -h -p python `. Record elapsed (wall clock) time, the percent of CPU this job got (only if parallelization was used), and total memory used (labelled "Maximum resident set size"). 6. Document your recipe on the "Corgi-DRP Implementation Document" on Confluence (see the big table in Section 2.0). You should fill out an entire row with your recipe. Under addition notes, note if your recipe took significant run time (> 1 minute) and significant memory (> 1 GB). 7. PR! @@ -135,19 +143,6 @@ To run the existing end-to-end tests, you need to have downloaded all the TVAC d pytest --which e2e --tvacdata_path /path/to/CGI_TVAC_Data --e2eoutput_path tests/e2e_tests/ tests/e2e_tests/ ``` -To run the existing end-to-end tests, you also need the II&T code, which is used directly for comparing results. First install Git LFS if it isn't already installed (https://docs.github.com/en/repositories/working-with-files/managing-large-files/installing-git-large-file-storage). Then install the II&T code by doing the following while in the top-level folder: - -``` -pip install -r requirements_e2etests.txt corgidrp -``` - -This will install the II&T repositories `cal` and `proc_cgi_frame`. To ensure the version from cgi_iit_drp (https://github.com/roman-corgi/cgi_iit_drp) is installed, you may have to do the following first: - -``` -pip uninstall proc_cgi_frame -pip uninstall cal -``` - ### Linting In addition to unit tests, your code will need to pass a static analysis before being merged. `corgidrp` currently runs a subset of flake8 tests, which you can replicate on your local system by running: @@ -180,21 +175,11 @@ Before creating a pull request, review the design Principles below. Use the Gith ## FAQ - * Does my pipeline function need to save files? - * Files will be saved by a higher level pipeline code. As long as you output an object that's an instance of a `corgidrp.Data` class, it will have a `save()` function that will be used. - * Can I create new data classes? - * Yes, you can feel free to make new data classes. Generally, they should be a subclass of the `Image` class, and you can look at the `Dark` class as an example. Each calibration type should have its own `Image` subclass defined. Talk with Jason and Max to discuss how your class should be implemented! - * You do not necessarily need to write a copy function for subclasses of the `Image` class. If you need to copy calibration objects at all you can import and apply the copy module of python, see - example: - ``` - import copy - flatfield = data.Flatfield('flatfield.fits') - #reference copy - flatfield_copy = copy.copy(flatfield) - #deep data copy - flatfield_copy = copy.deepcopy(flatfield) - ``` - +* Does my pipeline function need to save files? + * Files will be saved by a higher level pipeline code. As long as you output an object that's an instance of a `corgidrp.Data` class, it will have a `save()` function that will be used. +* Can I create new data classes? + * Yes, you can feel free to make new data classes. Generally, they should be a subclass of the `Image` class, and you can look at the `Dark` class as an example. Each calibration type should have its own `Image` subclass defined. Talk with Jason and Max to discuss how your class should be implemented! + * You do not necessarily need to write a copy function for subclasses of the `Image` class. If you need to copy calibration objects at all, you can use the copy function of the Image class. * What python version should I develop in? * Python 3.12 @@ -214,7 +199,20 @@ Before creating a pull request, review the design Principles below. Use the Gith ## Change Log -**v1.0** (To be released..) +**v1.1.2** + * Flat field correction marks pixels divided by 0 as bad + +**v1.1.1** + * Fix unit test that wasn't cleaning up environment properly + +**v1.1** + * Bug fix so that corgidrp classes can be pickled + * New corgidrp.ops interface + * Improved agreement with II&T pipeline in updated e2e tests + * Ability to embed just the illuminated part of the detector back into a full engineering frame  + * Updated DRP throughout to handle recently updated data header specification + +**v1.0** * First official pipeline release! * Step functions to produce the necessary calibration files for analog L1 to L2b step functions implemented and tested * Step function to produce boresight calibration implemented and tested diff --git a/corgidrp/__init__.py b/corgidrp/__init__.py index 30ed59b4..515571e4 100644 --- a/corgidrp/__init__.py +++ b/corgidrp/__init__.py @@ -3,7 +3,7 @@ import pathlib import configparser -__version__ = "1.0" +__version__ = "1.1.2" version = __version__ # temporary backwards compatability #### Create a configuration file for the corgidrp if it doesn't exist. diff --git a/corgidrp/calibrate_kgain.py b/corgidrp/calibrate_kgain.py index 8a2ef1fe..405ba47d 100644 --- a/corgidrp/calibrate_kgain.py +++ b/corgidrp/calibrate_kgain.py @@ -283,36 +283,37 @@ def calibrate_kgain(dataset_kgain, logspace_start=-1, logspace_stop=4, logspace_num=200, verbose=False, detector_regions=None): """ - Given an array of frame stacks for various exposure times, each sub-stack - having at least 5 illuminated pupil L1 SCI-size frames having the same - exposure time. The frames are bias-subtracted, and in addition, if EM gain - is >1 for the input data for calibrate_kgain, EM gain division is also needed. - It also creates a mean pupil array from a separate stack of - frames of uniform exposure time. The mean pupil array is scaled to the mean - of each stack and statistics (mean and std dev) are calculated for bins from - the frames in it. kgain (e-/DN) is calculated from the means and variances + kgain (e-/DN) is calculated from the means and variances within the defined minimum and maximum mean values. A photon transfer curve is plotted from the std dev and mean values from the bins. - + Args: - dataset_kgain (corgidrp.Dataset): Dataset with a set of of EXCAM illuminated - pupil L1 SCI frames (counts in DN) having a range of exp times. - datset_cal contains a set of subset of frames, and all subsets must have - the same number of frames, which is a minimum of 5. The frames in a subset - must all have the same exposure time. There must be at least 10 subsets - (More than 20 sub-stacks recommended. The mean signal in the pupil region should - span from about 100 to about 10000 DN. - In addition, dataset_kgain contains a set of at least 30 frames used to - build a mean frame. All the frames must have the same exposure time, - such that the net mean counts in the pupil region is a few thousand DN - (2000 to 4000 DN recommended; - notice that unity EM gain is recommended when k-gain is the primary desired - product, since it is known more accurately than non-unity values. This - mean frame is used to select pixels with similar illumination for - calculating variances (since the pupil illumination is not perfectly uniform). - All data must be obtained under the same positioning of the pupil - relative to the detector. These frames are identified with the kewyord - 'OBSTYPE'='MNFRAME' (TBD). + dataset_kgain (corgidrp.Dataset): The frames in the dataset are + bias-subtracted. The dataset contains frames belonging to two different + sets -- Mean frame and a large array of unity gain frames. + Mean frame: Unity gain frames with constant exposure time. These frames + are used to create a mean pupil image. The mean frame is used to select + pixels in each frame of the large array of unity gain frames (see next) + to calculate its mean signal. In general, it is expected that at least + 30 frames or more will be taken for this set. In TVAC, 30 frames, each + with an exposure time of 5.0 sec were taken. + Large array of unity gain frames: Set of unity gain frames with subsets + of equal exposure times. Data for each subset should be taken sequentially: + Each subset must have at least 5 frames. All frames for a subset are taken + before moving to the next subset. Two of the subsets have the same (repeated) + exposure time. These two subsets are not contiguous: The first subset is + taken near the start of the data collection and the second one is taken + at the end of the data collection (see TVAC example below). The mean + signal of these two subsets is used to correct for illumination + brightness/sensor sensitivity drifts for all the frames in the whole set, + depending on when the frames were taken. There should be no other repeated + exposure time among the subsets. In TVAC, a total of 110 frames were taken + within this category. The 110 frames consisted of 22 subsets, each with + 5 frames. All 5 frames had the same exposure time. The exposure times in + TVAC in seconds were, each repeated 5 times to collect 5 frames in each + subset -- 0.077, 0.770, 1.538, 2.308, 3.077, 3.846, 4.615, 5.385, 6.154, + 6.923, 7.692, 8.462, 9.231, 10.000, 11.538, 10.769, 12.308, 13.077, + 13.846, 14.615, 15.385, and 1.538 (again). n_cal (int): Minimum number of sub-stacks used to calibrate K-Gain. The default value is 10. diff --git a/corgidrp/calibrate_nonlin.py b/corgidrp/calibrate_nonlin.py index 31623f2e..199ed826 100644 --- a/corgidrp/calibrate_nonlin.py +++ b/corgidrp/calibrate_nonlin.py @@ -126,54 +126,53 @@ def calibrate_nonlin(dataset_nl, make_plot=True, plot_outdir='figures', show_plot=False, verbose=False): """ - Given a large array of stacks with 1 or more EM gains, and sub-stacks of - frames ranging over exposure time, each sub-stack having at least 1 illuminated - pupil SCI-sized L1 frame for each exposure time, this function processes the - frames to create a nonlinearity table. A mean pupil array is created from a - separate stack of frames of constant exposure time and used to make a mask; - the mask is used to select pixels in each frame in the large array of stacks - in order to calculate its mean signal. + Function that derives the non-linearity calibration table for a set of DN + and EM values. - The frames are bias-subtracted. - - Two sub-stacks/groups of frames at each EM gain value contain noncontiguous - frames with the same (repeated) exposure time, taken near the start and end - of the frame sequence. Their mean signals are computed and used to correct for - illumination brightness/sensor sensitivity drifts for all the frames for a - given EM gain, depending on when the frames were taken. The repeated exposure - time frames should only be repeated once (as opposed to 3 times, etc) and - other sets of exposure times for each EM gain should not be repeated. - Note, it is assumed that the frames for the large array of stacks are - collected in a systematic way, such that frames having the same exposure - time for a given EM gain are collected contiguously (with the exception of - the repeated group of frames noted above). The frames within each EM gain - group must also be time ordered. For best results, the mean signal in the - pupil region for the longest exposure time at each EM gain setting should - be between 8000 and 10000 DN. - A linear fit is applied to the corrected mean signals versus exposure time. - Relative gain values are calculated from the ratio of the mean signals - to the linear fit. Nonlinearity is then calculated from the inverse of - the relative gain and output as an array. The nonlinearity values, along with - the actual EM gain for each column and mean counts in DN for each row, are - returned as two arrays. One array contains the column headers with - actual/measured EM gain, and the other array contains the means in DN and the - nonlinearity values. The mean values start with min_write and run through - max_write. - Args: - dataset_nl (corgidrp.Dataset): dataset, which is implicitly - subdivided into smaller ranges of grouped frames. The frames are EXCAM - illuminated pupil L1 SCI frames. There must be one or more unique EM - gain values and at least 20 unique exposure times for each EM gain. The - number of frames for each EM gain can vary. The size of dataset_cal is: - Sum(N_t[g]) x 1200 x 2200, where N_t[g] is the number of frames having - EM gain value g, and the sum is over g. Each substack of dataset_cal must - have a group of frames with a repeated exposure time. In addition, there's - a set of at least 30 frames used to generate a mean frame. These frames - have the same exp time, such that the mean signal in the pupil regions - is a few thousand DN, which helps identify the pixels containing the - pupil image. They also have unity EM gain. These frames are - identified with the kewyord 'OBSTYPE'='MNFRAME' (TBD). + dataset_nl (corgidrp.Dataset): The frames in the dataset are + bias-subtracted. The dataset contains frames belonging to two different + sets -- Mean frame, a large array of unity gain frames, and set with + non-unity gain frames. + Mean frame -- Unity gain frames with constant exposure time. These frames + are used to create a mean pupil image. The mean frame is used to select + pixels in each frame of the large array of unity gain frames (see next) + to calculate its mean signal. In general, it is expected that at least + 30 frames or more will be taken for this set. In TVAC, 30 frames, each + with an exposure time of 5.0 sec were taken. + Large array of unity gain frames: Set of unity gain frames with subsets + of equal exposure times. Data for each subset should be taken sequentially: + Each subset must have at least 5 frames. All frames for a subset are taken + before moving to the next subset. Two of the subsets have the same (repeated) + exposure time. These two subsets are not contiguous: The first subset is + taken near the start of the data collection and the second one is taken + at the end of the data collection (see TVAC example below). The mean + signal of these two subsets is used to correct for illumination + brightness/sensor sensitivity drifts for all the frames in the whole set, + depending on when the frames were taken. There should be no other repeated + exposure time among the subsets. In TVAC, a total of 110 frames were taken + within this category. The 110 frames consisted of 22 subsets, each with + 5 frames. All 5 frames had the same exposure time. The exposure times in + TVAC in seconds were, each repeated 5 times to collect 5 frames in each + subset -- 0.077, 0.770, 1.538, 2.308, 3.077, 3.846, 4.615, 5.385, 6.154, + 6.923, 7.692, 8.462, 9.231, 10.000, 11.538, 10.769, 12.308, 13.077, + 13.846, 14.615, 15.385, and 1.538 (again). + Set with non-unity gain frames:: a set of subsets of frames. All frames + in each subset have a unique, non-unity EM gain. For instance, in TVAC, + 11 subsets were considered with EM values (CMDGAIN): 1.65, 5.24, 8.60, + 16.70, 27.50, 45.26, 87.50, 144.10, 237.26, 458.70 and 584.40. These + correspond to a range of actual EM gains from about 2 to 7000. Each subset + collects the same number of frames, which is at least 20 frames. In TVAC, + each non-unity EM value had 22 frames. In each subset, there are two + repeated exposure times: one near the start of the data collection and + one at the very end. The exposure times of the frames in each EM subset + do not need to be the same. For EM=1.65, the values of the exposure times + in seconds were: 0.076, 0.758, 1.515, 2.273, 3.031, 3.789, 4.546, 5.304, + 6.062, 6.820, 7.577, 8.335, 9.093, 9.851, 10.608, 11.366, 12.124, 12.881, + 13.639, 14.397, 15.155, and 1.515 (repeated). And for EM=5.24, the 22 + values of the exposure times in seconds were: 0.070, 0.704, 1.408, 2.112, + 2.816, 3.520, 4.225, 4.929, 5.633, 6.337, 7.041, 7.745, 8.449, 9.153, + 9.857, 10.561, 11.265, 11.969, 12.674, 13.378, 14.082, and 1.408 (repeated). n_cal (int): Minimum number of sub-stacks used to calibrate Non-Linearity. The default value is 20. @@ -246,15 +245,15 @@ def calibrate_nonlin(dataset_nl, raise TypeError('cal_arr must be an ndarray.') if np.ndim(cal_arr) != 3: raise CalNonlinException('cal_arr must be 3-D') - # mean_frame_arr must have at least 30 frames - if len(cal_arr) < n_cal: - raise Exception(f'mean_frame_arr must have at least {n_cal} frames') - if np.sum(len_list) != len(cal_arr): - raise CalNonlinException('Number of sub-stacks in cal_arr must ' - 'equal the sum of the elements in len_list') if len(len_list) < 1: raise CalNonlinException('Number of elements in len_list must ' 'be greater than or equal to 1.') + if np.sum(len_list) != len(cal_arr): + raise CalNonlinException('Number of sub-stacks in cal_arr must ' + 'equal the sum of the elements in len_list') + # cal_arr must have at least 20 frames for each EM gain + if np.any(np.array(len_list) < n_cal): + raise Exception(f'cal_arr must have at least {n_cal} frames for each EM value') if len(np.unique(datetime_arr)) != len(datetime_arr): raise CalNonlinException('All elements of datetime_arr must be unique.') for g_index in range(len(len_list)): @@ -274,6 +273,7 @@ def calibrate_nonlin(dataset_nl, if np.ndim(mean_frame_arr) != 3: raise CalNonlinException('mean_frame_arr must be 3-D (i.e., a stack of ' '2-D sub-stacks') + # mean_frame_arr must have at least 30 frames if len(mean_frame_arr) < n_mean: raise CalNonlinException(f'Number of frames in mean_frame_arr must ' 'be at least {n_mean}.') @@ -808,8 +808,7 @@ def calibrate_nonlin(dataset_nl, prhd = dataset_nl.frames[0].pri_hdr exthd = dataset_nl.frames[0].ext_hdr exthd['HISTORY'] = f"Non-linearity calibration derived from a set of frames on {exthd['DATETIME']}" - # Just for the purpose of getting the instance created. NEED to clarify the - # role of nonlin_arr3 and headers compared to data.NonLinearityCalibration.data + # Just for the purpose of getting the instance created nonlin = data.NonLinearityCalibration(nonlin_data, pri_hdr = prhd, ext_hdr = exthd, input_dataset=dataset_nl) @@ -867,7 +866,8 @@ def nonlin_dataset_2_stack(dataset): if frame.ext_hdr['CMDGAIN'] != 1: raise Exception('The commanded gain used to build the mean frame must be unity') mean_frame_stack.append(frame.data) - else: + elif (frame.pri_hdr['OBSTYPE'] == 'KGAIN' or + frame.pri_hdr['OBSTYPE'] == 'NONLIN'): len_cal_frames += 1 sub_stack.append(frame.data) exp_time = frame.ext_hdr['EXPTIME'] @@ -877,6 +877,7 @@ def nonlin_dataset_2_stack(dataset): raise Exception('Exposure times must be positive') exp_times.append(exp_time) datetime = frame.ext_hdr['DATETIME'] + if isinstance(datetime, str) is False: raise Exception('DATETIME must be a string') datetimes.append(datetime) @@ -889,9 +890,13 @@ def nonlin_dataset_2_stack(dataset): except: # use commanded gain otherwise gains.append(frame.ext_hdr['CMDGAIN']) record_gain = False + else: + raise Exception('OBSTYPE can only be MNFRAME or NONLIN in non-linearity') + # First layer (array of unique EM values) - stack.append(np.stack(sub_stack)) - len_sstack.append(len_cal_frames) + if len(sub_stack): + stack.append(np.stack(sub_stack)) + len_sstack.append(len_cal_frames) # All elements of datetimes must be unique if len(datetimes) != len(set(datetimes)): diff --git a/corgidrp/combine.py b/corgidrp/combine.py index 6bc38b9e..3235db97 100644 --- a/corgidrp/combine.py +++ b/corgidrp/combine.py @@ -32,7 +32,7 @@ def combine_images(data_subset, err_subset, dq_subset, collapse, num_frames_scal n_samples = np.sum(n_samples, axis=0) if collapse.lower() == "mean": data_collapse = np.nanmean(data_subset, axis=0) - err_collapse = np.sqrt(np.nanmean(err_subset**2, axis=0)) /np.sqrt(n_samples) # not sure if this is correct, but good enough for now + err_collapse = np.sqrt(np.nanmean(err_subset**2, axis=0)) /np.sqrt(n_samples) # correct assuming standard error propagation elif collapse.lower() == "median": data_collapse = np.nanmedian(data_subset, axis=0) err_collapse = np.sqrt(np.nanmean(err_subset**2, axis=0)) /np.sqrt(n_samples) * np.sqrt(np.pi/2) # inflate median error @@ -72,7 +72,7 @@ def combine_subexposures(input_dataset, num_frames_per_group=None, collapse="mea num_frames_per_group = len(input_dataset) if len(input_dataset) % num_frames_per_group != 0: - raise ValueError("Input dataset of lenght {0} cannot be grouped in sets of {1}".format(len(input_dataset, num_frames_per_group))) + raise ValueError("Input dataset of length {0} cannot be grouped in sets of {1}".format(len(input_dataset), num_frames_per_group)) if collapse.lower() not in ["mean", "median"]: raise ValueError("combine_subexposures can only collapse with mean or median") @@ -82,7 +82,7 @@ def combine_subexposures(input_dataset, num_frames_per_group=None, collapse="mea for i in range(num_groups): data_subset = np.copy(input_dataset.all_data[num_frames_per_group*i:num_frames_per_group*(i+1)]) err_subset = np.copy(input_dataset.all_err[num_frames_per_group*i:num_frames_per_group*(i+1)]) - dq_subset = input_dataset.all_dq[num_frames_per_group*i:num_frames_per_group*(i+1)] + dq_subset = np.copy(input_dataset.all_dq[num_frames_per_group*i:num_frames_per_group*(i+1)]) data_collapse, err_collapse, dq_collapse = combine_images(data_subset, err_subset, dq_subset, collapse=collapse, num_frames_scaling=num_frames_scaling) @@ -91,7 +91,7 @@ def combine_subexposures(input_dataset, num_frames_per_group=None, collapse="mea pri_hdr = input_dataset[num_frames_per_group*i].pri_hdr.copy() ext_hdr = input_dataset[num_frames_per_group*i].ext_hdr.copy() err_hdr = input_dataset[num_frames_per_group*i].err_hdr.copy() - dq_hdr = input_dataset[num_frames_per_group*i].err_hdr.copy() + dq_hdr = input_dataset[num_frames_per_group*i].dq_hdr.copy() hdulist = input_dataset[num_frames_per_group*i].hdu_list.copy() new_image = data.Image(data_collapse, pri_hdr=pri_hdr, ext_hdr=ext_hdr, err=err_collapse, dq=dq_collapse, err_hdr=err_hdr, dq_hdr=dq_hdr, input_hdulist=hdulist) diff --git a/corgidrp/data.py b/corgidrp/data.py index 35adfd4f..b108f5ae 100644 --- a/corgidrp/data.py +++ b/corgidrp/data.py @@ -4,7 +4,7 @@ import astropy.io.fits as fits import astropy.time as time import pandas as pd - +import copy import corgidrp class Dataset(): @@ -491,25 +491,12 @@ def copy(self, copy_data=True): corgidrp.data.Image: a copy of this Image """ if copy_data: - new_data = np.copy(self.data) - new_err = np.copy(self.err) - new_dq = np.copy(self.dq) - new_hdulist = self.hdu_list.copy() + new_img = copy.deepcopy(self) else: - new_data = self.data # this is just pointer referencing - new_err = self.err - new_dq = self.dq - new_hdulist = self.hdu_list - new_img = Image(new_data, pri_hdr=self.pri_hdr.copy(), ext_hdr=self.ext_hdr.copy(), err = new_err, dq = new_dq, - input_hdulist = new_hdulist, err_hdr = self.err_hdr.copy(), dq_hdr = self.dq_hdr.copy()) - - # annoying, but we got to manually update some parameters. Need to keep track of which ones to update - new_img.filename = self.filename - new_img.filedir = self.filedir - + new_img = copy.copy(self) # update DRP version tracking - self.ext_hdr['DRPVERSN'] = corgidrp.__version__ - self.ext_hdr['DRPCTIME'] = time.Time.now().isot + new_img.ext_hdr['DRPVERSN'] = corgidrp.__version__ + new_img.ext_hdr['DRPCTIME'] = time.Time.now().isot return new_img @@ -658,37 +645,6 @@ def __init__(self, data_or_filepath, pri_hdr=None, ext_hdr=None, input_dataset=N if self.ext_hdr['DATATYPE'] != 'Dark': raise ValueError("File that was loaded was not a Dark file.") - def copy(self, copy_data = True): - """ - Make a copy of this Dark file, including data and headers. - Data copying can be turned off if you only want to modify the headers - Headers should always be copied as we should modify them any time we make new edits to the data - - Args: - copy_data (bool): (optional) whether the data should be copied. Default is True - - Returns: - new_nm (corgidrp.data.NoiseMap): a copy of this Dark - """ - if copy_data: - new_data = np.copy(self.data) - new_err = np.copy(self.err) - new_dq = np.copy(self.dq) - else: - new_data = self.data # this is just pointer referencing - new_err = self.err - new_dq = self.dq - new_dark = Dark(new_data, pri_hdr=self.pri_hdr.copy(), ext_hdr=self.ext_hdr.copy(), err = new_err, dq = new_dq, err_hdr = self.err_hdr.copy()) - - # annoying, but we got to manually update some parameters. Need to keep track of which ones to update - new_dark.filename = self.filename - new_dark.filedir = self.filedir - - # update DRP version tracking - self.ext_hdr['DRPVERSN'] = corgidrp.version - self.ext_hdr['DRPCTIME'] = time.Time.now().isot - - return new_dark class FlatField(Image): """ @@ -910,40 +866,6 @@ def value(self): @property def error(self): return self._kgain_error - - def copy(self, copy_data = True): - """ - Make a copy of this KGain file. including data and headers. - Data copying can be turned off if you only want to modify the headers - Headers should always be copied as we should modify them any time we make new edits to the data - - Args: - copy_data (bool): (optional) whether the data should be copied. Default is True - - Returns: - corgidrp.data.KGain: a copy of this KGain - """ - if copy_data: - new_data = np.copy(self.data) - new_ptc = np.copy(self.ptc) - new_err = np.copy(self.err) - else: - new_data = self.data # this is just pointer referencing - new_ptc = self.ptc - new_err = np.copy(self.err) - - new_kg = KGain(new_data, err = new_err, ptc = new_ptc, pri_hdr=self.pri_hdr.copy(), ext_hdr=self.ext_hdr.copy(), err_hdr = self.err_hdr.copy(), ptc_hdr = self.ptc_hdr.copy()) - - # annoying, but we got to manually update some parameters. Need to keep track of which ones to update - new_kg.filename = self.filename - new_kg.filedir = self.filedir - - # update DRP version tracking - self.ext_hdr['DRPVERSN'] = corgidrp.__version__ - self.ext_hdr['DRPCTIME'] = time.Time.now().isot - - return new_kg - def save(self, filedir=None, filename=None): """ @@ -1019,33 +941,6 @@ def __init__(self, data_or_filepath, pri_hdr=None, ext_hdr=None, input_dataset=N if self.ext_hdr['DATATYPE'] != 'BadPixelMap': raise ValueError("File that was loaded was not a BadPixelMap file.") - def copy(self, copy_data = True): - """ - Make a copy of this BadPixelMap file. including data and headers. - Data copying can be turned off if you only want to modify the headers - Headers should always be copied as we should modify them any time we make new edits to the data - - Args: - copy_data (bool): (optional) whether the data should be copied. Default is True - - Returns: - corgidrp.data.BadPixelMap: a copy of this BadPixelMap - """ - if copy_data: - new_data = np.copy(self.data) - else: - new_data = self.data # this is just pointer referencing - new_bp = BadPixelMap(new_data, pri_hdr=self.pri_hdr.copy(), ext_hdr=self.ext_hdr.copy()) - - # we got to manually update some parameters. Need to keep track of which ones to update - new_bp.filename = self.filename - new_bp.filedir = self.filedir - - # update DRP version tracking - self.ext_hdr['DRPVERSN'] = corgidrp.__version__ - self.ext_hdr['DRPCTIME'] = time.Time.now().isot - - return new_bp class DetectorNoiseMaps(Image): """ @@ -1126,39 +1021,6 @@ def __init__(self, data_or_filepath, pri_hdr=None, ext_hdr=None, input_dataset=N self.CIC_err = self.err[0][1] self.DC_err = self.err[0][2] - - def copy(self, copy_data = True): - """ - Make a copy of this DetectorNoiseMaps file, including data and headers. - Data copying can be turned off if you only want to modify the headers - Headers should always be copied as we should modify them any time we make new edits to the data - - Args: - copy_data (bool): (optional) whether the data should be copied. Default is True - - Returns: - new_nm (corgidrp.data.DetectorNoiseMaps): a copy of this DetectorNoiseMaps - """ - if copy_data: - new_data = np.copy(self.data) - new_err = np.copy(self.err) - new_dq = np.copy(self.dq) - else: - new_data = self.data # this is just pointer referencing - new_err = self.err - new_dq = self.dq - new_nm = DetectorNoiseMaps(new_data, pri_hdr=self.pri_hdr.copy(), ext_hdr=self.ext_hdr.copy(), err = new_err, dq = new_dq, err_hdr = self.err_hdr.copy()) - - # annoying, but we got to manually update some parameters. Need to keep track of which ones to update - new_nm.filename = self.filename - new_nm.filedir = self.filedir - - # update DRP version tracking - self.ext_hdr['DRPVERSN'] = corgidrp.version - self.ext_hdr['DRPCTIME'] = time.Time.now().isot - - return new_nm - class DetectorParams(Image): """ Class containing detector parameters that may change over time. diff --git a/corgidrp/data/filter_curves/transmission_ID-01_1F_v0.csv b/corgidrp/data/filter_curves/transmission_ID-01_1F_v0.csv new file mode 100644 index 00000000..0b3b972a --- /dev/null +++ b/corgidrp/data/filter_curves/transmission_ID-01_1F_v0.csv @@ -0,0 +1,126 @@ +# Filter transmission curve produced by Materion. +#Truncated at T<1% due to measurement noise. +#True CGI curve may differ slightly (TBD) due to difference in f/number in Materion test setup and vacuum vs. air. +lambda_nm,%T +543.5,2.62223506 +544,7.046983242 +544.5,16.13804245 +545,31.6402874 +545.5,51.54069519 +546,72.01272583 +546.5,87.10562897 +547,93.03697205 +547.5,92.91194916 +548,91.69495392 +548.5,92.05001068 +549,93.96981812 +549.5,95.96953583 +550,96.82724762 +550.5,95.89777374 +551,93.58200836 +551.5,90.82911682 +552,89.20240784 +552.5,89.50035858 +553,91.77245331 +553.5,94.73375702 +554,96.91140747 +554.5,97.46907043 +555,96.67098999 +555.5,95.39193726 +556,94.60858154 +556.5,94.81651306 +557,95.95736694 +557.5,97.15691376 +558,97.69484711 +558.5,97.13855743 +559,95.89714813 +559.5,94.83798981 +560,94.58621979 +560.5,95.27588654 +561,96.56955719 +561.5,97.75156403 +562,98.31775665 +562.5,98.13916016 +563,97.43697357 +563.5,96.77692413 +564,96.50816345 +564.5,96.80419159 +565,97.41926575 +565.5,97.71026611 +566,97.06268311 +566.5,95.39664459 +567,93.243927 +567.5,91.69695282 +568,91.51848602 +568.5,92.79511261 +569,94.72873688 +569.5,96.70283508 +570,98.0776825 +570.5,98.77159882 +571,98.89006042 +571.5,98.67003632 +572,98.45578766 +572.5,98.40923309 +573,98.3951416 +573.5,98.15956116 +574,97.20458221 +574.5,95.2546463 +575,92.63168335 +575.5,90.20106506 +576,88.72252655 +576.5,88.90052795 +577,90.50936127 +577.5,93.09736633 +578,95.49654388 +578.5,97.18186188 +579,97.8999939 +579.5,97.96646118 +580,97.92380524 +580.5,97.92311859 +581,98.21533203 +581.5,98.57277679 +582,98.84672546 +582.5,98.90722656 +583,98.81912231 +583.5,98.35873413 +584,97.63378906 +584.5,96.69090271 +585,95.74626923 +585.5,95.11701202 +586,95.18188477 +586.5,95.79512787 +587,96.62108612 +587.5,97.26956177 +588,97.47356415 +588.5,97.34996796 +589,97.287323 +589.5,97.48419952 +590,97.67388916 +590.5,97.20954895 +591,95.73603821 +591.5,93.51905823 +592,91.41559601 +592.5,90.4797287 +593,90.99110413 +593.5,92.51161194 +594,94.18556976 +594.5,95.41777039 +595,96.0429306 +595.5,96.33422089 +596,96.60414124 +596.5,96.88964844 +597,97.0973053 +597.5,96.75981903 +598,95.75063324 +598.5,94.19282532 +599,92.8374176 +599.5,92.18450165 +600,92.00694275 +600.5,90.4809494 +601,83.8256073 +601.5,69.58136749 +602,49.08872223 +602.5,27.68443489 +603,12.29771996 +603.5,4.186101437 +604,1.29500711 diff --git a/corgidrp/data/filter_curves/transmission_ID-02_2F_v0.csv b/corgidrp/data/filter_curves/transmission_ID-02_2F_v0.csv new file mode 100644 index 00000000..5d5e7ca8 --- /dev/null +++ b/corgidrp/data/filter_curves/transmission_ID-02_2F_v0.csv @@ -0,0 +1,238 @@ +# Filter transmission curve produced by Materion. +#Truncated at T<1% due to measurement noise. +#True CGI curve may differ slightly (TBD) due to difference in f/number in Materion test setup and vacuum vs. air. +lambda_nm,%T +601,1.3001729483352324 +601.5,3.386690123889491 +602,7.382010418293351 +602.5,14.143447378912114 +603,24.47849782223457 +603.5,37.60763624851235 +604,52.6092456880712 +604.5,67.66246276009122 +605,80.23156714506919 +605.5,89.205462697482 +606,94.03010681772564 +606.5,95.52692660102736 +607,94.88472386874645 +607.5,93.36145451407988 +608,91.93339904067327 +608.5,91.25706634026977 +609,91.3150857575214 +609.5,91.74774762740232 +610,91.95353617342089 +610.5,91.6544213534837 +611,90.82009428087063 +611.5,90.03100125983079 +612,89.7703342303015 +612.5,90.22675563200285 +613,91.26946959680629 +613.5,92.50005884467764 +614,93.5415118503144 +614.5,94.14578646142174 +615,94.32266464999611 +615.5,94.23483310694074 +616,94.17341293134776 +616.5,94.40520016455143 +617,95.00900860591135 +617.5,95.88234135208589 +618,96.84187020577428 +618.5,97.54262731010886 +619,97.84107725793542 +619.5,97.58261117986831 +620,96.86838091940989 +620.5,95.89573043758745 +621,95.01713987683206 +621.5,94.48594741793865 +622,94.5151099672162 +622.5,95.08491834774534 +623,96.02552518116649 +623.5,97.00800350170519 +624,97.78508302815108 +624.5,98.11751774122393 +625,97.9056078061507 +625.5,97.3771436007209 +626,96.73876959262634 +626.5,96.169190475579 +627,95.92881342445746 +627.5,96.03319792512362 +628,96.40203997560293 +628.5,96.8350915974096 +629,97.08959903398177 +629.5,97.01020445316738 +630,96.61910837908378 +630.5,95.94676490132736 +631,95.19437725621358 +631.5,94.63437378604915 +632,94.4347601085386 +632.5,94.7057746460185 +633,95.39682617614481 +633.5,96.39578867925644 +634,97.44055838262904 +634.5,98.28010506592047 +635,98.91782947745531 +635.5,99.17881747385577 +636,99.27607947093452 +636.5,99.26779534433692 +637,99.18593234467608 +637.5,99.18721623219427 +638,99.20454105087275 +638.5,99.205236484102 +639,99.1349743087746 +639.5,98.9189452337457 +640,98.64336068214718 +640.5,98.19792873215553 +641,97.76066626700383 +641.5,97.32273129697441 +642,97.08540347620443 +642.5,97.12084025018001 +643,97.3819658066729 +643.5,97.86628879848512 +644,98.31856814189605 +644.5,98.69878939601084 +645,98.9117310167522 +645.5,98.86941628823872 +646,98.70412362841336 +646.5,98.47387247456122 +647,98.35603009332216 +647.5,98.33983631650177 +648,98.49007388414869 +648.5,98.79794664321479 +649,99.13404960204679 +649.5,99.43079534931074 +650,99.69519939963699 +650.5,99.84717938176573 +651,99.90514529953045 +651.5,99.90905045155522 +652,99.9297913241595 +652.5,99.90625341303699 +653,99.8872014654444 +653.5,99.86798902940784 +654,99.84721759568494 +654.5,99.79480754103744 +655,99.64524248828502 +655.5,99.35863027105755 +656,98.96031234721335 +656.5,98.42985352545163 +657,97.91098025247243 +657.5,97.55269205509074 +658,97.45719540084535 +658.5,97.67387406241717 +659,98.11975689658858 +659.5,98.70044773987037 +660,99.15507323761976 +660.5,99.41421182056459 +661,99.55784654384172 +661.5,99.5456267041007 +662,99.51456125189738 +662.5,99.4945846075937 +663,99.5501126676306 +663.5,99.56590140695752 +664,99.48607886029814 +664.5,99.30176482401554 +665,99.01914182943513 +665.5,98.70856373524018 +666,98.56666378366101 +666.5,98.59616259539419 +667,98.7884550569374 +667.5,99.08121159157105 +668,99.35107980162518 +668.5,99.5131703754221 +669,99.469434199298 +669.5,99.33047648911334 +670,99.16439669266934 +670.5,99.02578287770739 +671,99.06343588871282 +671.5,99.23549186131844 +672,99.47181091475507 +672.5,99.7081299681917 +673,99.89902391047579 +673.5,99.95397116902433 +674,99.99109686801158 +674.5,100 +675,99.88684228465401 +675.5,99.67318993509896 +676,99.27681311808301 +676.5,98.68679117696779 +677,97.98009611039082 +677.5,97.31589154633897 +678,96.80093871095224 +678.5,96.58514654218901 +679,96.63253568852132 +679.5,96.89523553828496 +680,97.24286289361196 +680.5,97.61924777073982 +681,97.87264709380543 +681.5,98.12999741558214 +682,98.35372215290292 +682.5,98.58253659361023 +683,98.87076895079477 +683.5,99.17582938577496 +684,99.4058589289459 +684.5,99.57324553124288 +685,99.5442969698962 +685.5,99.32369023796481 +686,98.78045370332526 +686.5,98.08163771583568 +687,97.24208339977719 +687.5,96.42401887902236 +688,95.75649761723467 +688.5,95.38388033203917 +689,95.30031312179896 +689.5,95.46734054330557 +690,95.88172996944539 +690.5,96.40926947816413 +691,96.9745842623963 +691.5,97.51062186561002 +692,97.99827685074251 +692.5,98.44117159683312 +693,98.82422045058475 +693.5,99.10322869889293 +694,99.20733044660713 +694.5,99.17587524247803 +695,98.92816169983823 +695.5,98.44826352928514 +696,97.82995024613327 +696.5,97.10272829488291 +697,96.36951487188648 +697.5,95.75879026206947 +698,95.39436540008221 +698.5,95.15342283304 +699,95.05489987679456 +699.5,95.01100321222648 +700,94.96555518273918 +700.5,94.85655481125131 +701,94.7602251636842 +701.5,94.75974370836906 +702,94.97536009310389 +702.5,95.33645297178765 +703,95.84299177328494 +703.5,96.35861712356616 +704,96.81387691227404 +704.5,97.12206300544423 +705,97.32700327257344 +705.5,97.40993618183838 +706,97.47573532198743 +706.5,97.45698906571506 +707,97.29898705072165 +707.5,96.87577091041507 +708,95.90615437337648 +708.5,94.47820588892692 +709,92.65242857873216 +709.5,90.79612841391992 +710,89.35471441613036 +710.5,88.64135537292961 +711,88.73922873261535 +711.5,89.2588662238784 +712,89.49126483972256 +712.5,88.61816136704658 +713,85.59181327993123 +713.5,79.81629122612077 +714,71.000183720804 +714.5,59.15154560706325 +715,45.90814897468326 +715.5,32.432903976845594 +716,20.174933645999253 +716.5,11.063804657540823 +717,5.166467734836327 +717.5,1.9176504905695853 diff --git a/corgidrp/data/filter_curves/transmission_ID-03_3F_v0.csv b/corgidrp/data/filter_curves/transmission_ID-03_3F_v0.csv new file mode 100644 index 00000000..e6af616e --- /dev/null +++ b/corgidrp/data/filter_curves/transmission_ID-03_3F_v0.csv @@ -0,0 +1,259 @@ +# Filter transmission curve produced by Materion. +#Truncated at T<1% due to measurement noise. +#True CGI curve may differ slightly (TBD) due to difference in f/number in Materion test setup and vacuum vs. air. +lambda_nm,%T +666.5,1.074771643 +667,3.167236328 +667.5,9.418622017 +668,23.36450386 +668.5,45.54508591 +669,69.83283997 +669.5,87.22862244 +670,95.46642303 +670.5,97.4548111 +671,97.50283051 +671.5,97.08765411 +672,96.43386841 +672.5,95.95307922 +673,95.96282196 +673.5,96.51925659 +674,97.26030731 +674.5,97.78868103 +675,98.0076828 +675.5,98.41462708 +676,98.71331787 +676.5,98.80234528 +677,98.40351868 +677.5,97.58331299 +678,97.12963104 +678.5,97.03231049 +679,97.5760498 +679.5,98.290802 +680,98.65039825 +680.5,98.48930359 +681,97.89055634 +681.5,97.14070892 +682,97.09299469 +682.5,97.59227753 +683,97.92131805 +683.5,97.87980652 +684,97.22177124 +684.5,95.76123047 +685,94.38211823 +685.5,94.06650543 +686,94.57991028000001 +686.5,95.86216736 +687,97.35877228 +687.5,98.18286133 +688,98.18283844 +688.5,97.4990921 +689,97.09774017 +689.5,97.22727203 +690,97.84292603 +690.5,98.5025177 +691,98.21131134 +691.5,96.72081757 +692,94.52426147 +692.5,92.32913208 +693,91.31292725 +693.5,91.41236877 +694,92.62239075 +694.5,94.65657806 +695,96.22335815 +695.5,97.17272949 +696,97.38144684 +696.5,97.10403442 +697,96.63903809 +697.5,96.16997528 +698,95.41682434 +698.5,94.87506866 +699,93.55324554 +699.5,92.60647583 +700,92.19068909 +700.5,92.51579285 +701,93.79833221 +701.5,95.71259308 +702,97.1543045 +702.5,97.81784058 +703,97.00537872 +703.5,95.49311066 +704,93.62508392 +704.5,92.49376678 +705,92.27981567 +705.5,93.52000427 +706,95.21501923 +706.5,96.84896851 +707,98.34132385 +707.5,98.33589172 +708,97.41610718 +708.5,96.15274048 +709,94.93564606 +709.5,94.22542572 +710,94.40536499 +710.5,95.17276001 +711,96.41345978 +711.5,97.63631439 +712,98.3645401 +712.5,98.76622772 +713,98.83857727 +713.5,98.59922028 +714,98.56375885 +714.5,98.43694305 +715,98.69271851 +715.5,98.70114136 +716,99.1073761 +716.5,98.89421082 +717,98.67140961 +717.5,98.36181641 +718,98.36425781 +718.5,98.38733673 +719,98.49157715 +719.5,98.43535614 +720,97.97961426 +720.5,97.68770599 +721,96.84619141 +721.5,96.08642578 +722,95.93739319 +722.5,96.02684021 +723,96.61627197 +723.5,97.46370697 +724,98.45738983 +724.5,98.51200867 +725,98.26969147 +725.5,97.54436493 +726,96.58948517 +726.5,95.65851593 +727,95.41732025 +727.5,95.78085327 +728,96.18370819 +728.5,97.00123596 +729,97.58006287 +729.5,97.37598419 +730,97.47728729 +730.5,97.32858276 +731,96.85647583 +731.5,97.16051483 +732,97.06370544 +732.5,97.30647278000001 +733,97.66939545 +733.5,97.2485199 +734,96.53100586 +734.5,95.99850464 +735,96.00070953 +735.5,95.89662933 +736,96.23082733 +736.5,96.72091675 +737,96.97782135 +737.5,96.98600006 +738,96.44591522 +738.5,94.98120117 +739,93.58158875 +739.5,92.59606171 +740,91.93250275 +740.5,92.33233643 +741,93.49472809 +741.5,94.59004211 +742,95.32366943 +742.5,96.2127533 +743,95.83169556 +743.5,95.08282471 +744,94.22571564 +744.5,93.51841736 +745,93.30796051 +745.5,94.07875061 +746,95.09494781 +746.5,95.99009705 +747,97.00227356 +747.5,97.90049744 +748,98.28401184 +748.5,97.61830139 +749,97.37159729 +749.5,97.2593689 +750,97.4930954 +750.5,97.70272064 +751,97.84299469 +751.5,97.85762024 +752,97.37683868 +752.5,96.52940369 +753,95.9841156 +753.5,95.73191833 +754,95.27902985 +754.5,95.44335938 +755,96.15307617 +755.5,97.00532532 +756,97.79270935 +756.5,98.50984955 +757,98.3453598 +757.5,98.2101593 +758,97.63716125 +758.5,97.39398193 +759,97.17584229 +759.5,96.91340637 +760,97.3819809 +760.5,97.78051758 +761,97.92136383 +761.5,97.9393692 +762,97.62387848 +762.5,97.22895813 +763,96.38275909 +763.5,95.73005676 +764,95.05382538 +764.5,94.78968048 +765,94.32489777 +765.5,93.96659851 +766,94.42705536 +766.5,94.83333588 +767,95.28948975 +767.5,95.68229675 +768,96.40258026 +768.5,96.86225891 +769,96.93479919 +769.5,97.25769043 +770,97.17481995 +770.5,96.39574432 +771,96.48906708 +771.5,96.15790558 +772,95.74002075 +772.5,96.11358643 +773,96.38831329 +773.5,97.16799164 +774,98.29249573 +774.5,97.99868011 +775,98.31827545 +775.5,98.19773865 +776,97.26448059 +776.5,96.62248993 +777,96.83501434 +777.5,96.22483063 +778,95.96892548 +778.5,96.28366089 +779,96.56807709 +779.5,97.34089661 +780,97.06061554 +780.5,97.50209808 +781,97.19377136 +781.5,97.731987 +782,97.76856995 +782.5,97.84587096999999 +783,97.68737793 +783.5,97.36869812 +784,97.07810211 +784.5,96.85930634 +785,97.28977966 +785.5,97.59962463 +786,97.63251495 +786.5,97.97173309 +787,97.90291595 +787.5,97.70092773 +788,97.30383301 +788.5,97.70261383 +789,96.56854248 +789.5,93.95090485 +790,85.30145264 +790.5,67.80437469 +791,43.88753891 +791.5,22.9462471 +792,10.36684227 +792.5,4.11921978 +793,1.691660881 +793.5,1.166493893 diff --git a/corgidrp/data/filter_curves/transmission_ID-04_4F_v0.csv b/corgidrp/data/filter_curves/transmission_ID-04_4F_v0.csv new file mode 100644 index 00000000..a1be2103 --- /dev/null +++ b/corgidrp/data/filter_curves/transmission_ID-04_4F_v0.csv @@ -0,0 +1,208 @@ +# Filter transmission curve produced by Materion. +#Truncated at T<1% due to measurement noise. +#True CGI curve may differ slightly (TBD) due to difference in f/number in Materion test setup and vacuum vs. air. +lambda_nm,%T +774.5,2.607397318 +775,6.119506836 +775.5,12.29881001 +776,21.22951508 +776.5,32.43777084 +777,46.04741669 +777.5,60.5358429 +778,74.46871185 +778.5,86.31536865 +779,93.76290131 +779.5,97.21960449 +780,98.1055603 +780.5,98.16951752 +781,97.9030304 +781.5,97.70967865 +782,97.68265533 +782.5,97.79682922 +783,98.03553009 +783.5,98.31847382 +784,98.6043396 +784.5,98.87443542 +785,99.02718353 +785.5,99.18736267 +786,99.26405334 +786.5,99.20742798 +787,99.14316559 +787.5,98.94221497 +788,98.65193939 +788.5,98.55516052 +789,98.4324646 +789.5,98.27626038 +790,98.33323669 +790.5,98.40023804 +791,98.5348587 +791.5,98.67603302 +792,98.91829681 +792.5,99.10044861 +793,99.17909241 +793.5,99.26438141 +794,99.34832764 +794.5,99.31990051 +795,99.27350616 +795.5,99.26464081 +796,99.24629211 +796.5,99.10198212 +797,98.99059296 +797.5,98.85362244 +798,98.67990875 +798.5,98.63317871 +799,98.70613098 +799.5,98.72871399 +800,98.85072327 +800.5,98.92288208 +801,99.03604126 +801.5,99.20272064 +802,99.20328522 +802.5,99.17562866 +803,99.16133881 +803.5,99.24816895 +804,99.17129517 +804.5,99.10821533 +805,99.05590057 +805.5,98.95515442 +806,98.60359192 +806.5,98.35285187 +807,97.93200684 +807.5,97.71168518 +808,97.57345581 +808.5,97.64571381 +809,97.75281525 +809.5,98.09890747 +810,98.38904572 +810.5,98.56027222 +811,98.7377243 +811.5,98.72770691 +812,98.56394196 +812.5,98.46011353 +813,98.34515381 +813.5,98.29756927 +814,98.27790833 +814.5,98.25579071 +815,98.30797577 +815.5,98.3145752 +816,98.15906525 +816.5,98.05577087 +817,97.84544373 +817.5,97.71379089 +818,97.63305664 +818.5,97.63643646 +819,97.71598816 +819.5,97.62771606 +820,97.70117188 +820.5,97.74869537 +821,97.9016037 +821.5,97.90232849 +822,98.03433228 +822.5,98.12084961 +823,98.08516693 +823.5,98.11482239 +824,98.11250305 +824.5,98.03469086 +825,97.94036102 +825.5,98.0965271 +826,98.27873993 +826.5,98.46389008 +827,98.69721985 +827.5,98.85894012 +828,98.93021393 +828.5,98.89567566 +829,98.63631439 +829.5,98.49500275 +830,98.43567657 +830.5,98.29528809 +831,98.02468109 +831.5,97.80431366 +832,97.33279419 +832.5,96.59515381 +833,95.84128571 +833.5,94.99229431 +834,94.33330536 +834.5,94.02957916 +835,94.16448212 +835.5,94.6627655 +836,95.39266205 +836.5,96.19602203 +837,97.05699921 +837.5,97.79457092 +838,98.29811096 +838.5,98.43067169 +839,98.3783493 +839.5,98.31827545 +840,98.05306244 +840.5,97.87277222 +841,97.73587036 +841.5,97.70256042 +842,97.73091125 +842.5,98.06996918 +843,98.33365631 +843.5,98.55279541 +844,98.77254486 +844.5,98.78225708 +845,98.85475922 +845.5,98.68682861 +846,98.35428619 +846.5,98.2307663 +847,98.06269836 +847.5,98.02948761 +848,98.01639557 +848.5,98.34991455 +849,98.62162018 +849.5,98.81937408 +850,98.93099213 +850.5,99.03549194 +851,98.93656158 +851.5,98.81784821 +852,98.66210175 +852.5,98.34527588 +853,98.11716461 +853.5,97.81734467 +854,97.65375519 +854.5,97.69297028 +855,97.68740845 +855.5,97.98963928 +856,98.20220947 +856.5,98.46676636 +857,98.51070404 +857.5,98.62549591 +858,98.43197632 +858.5,98.22462463 +859,97.92352295 +859.5,97.40486145 +860,97.13057709 +860.5,96.77129364 +861,96.77594757 +861.5,96.79536438 +862,97.08629608 +862.5,97.19421387 +863,96.92913818 +863.5,96.76654053 +864,96.02629852 +864.5,95.32683563 +865,94.68447876 +865.5,94.47962952 +866,94.56920624 +866.5,95.06407928 +867,95.39228821 +867.5,95.73034668 +868,95.45861816 +868.5,95.16710663 +869,94.95645142 +869.5,95.12712096999999 +870,95.11745453 +870.5,95.14229584 +871,94.99565125 +871.5,93.28001404 +872,90.44902039 +872.5,85.44277954 +873,76.62340546 +873.5,63.92355728 +874,48.34553528 +874.5,32.72177124 +875,18.56053543 +875.5,8.367739677 +876,2.870337009 diff --git a/corgidrp/data/filter_curves/transmission_ID-05_1A_v0.csv b/corgidrp/data/filter_curves/transmission_ID-05_1A_v0.csv new file mode 100644 index 00000000..a368566b --- /dev/null +++ b/corgidrp/data/filter_curves/transmission_ID-05_1A_v0.csv @@ -0,0 +1,51 @@ +# Filter transmission curve produced by Materion. +#Truncated at T<1% due to measurement noise. +#True CGI curve may differ slightly (TBD) due to difference in f/number in Materion test setup and vacuum vs. air. +lambda_nm,%T +543,1.148140907 +543.5,3.164474249 +544,7.365199089 +544.5,15.11584377 +545,26.38548851 +545.5,40.78945923 +546,57.00786209 +546.5,71.64466858 +547,82.86642456 +547.5,89.76339722 +548,92.97943115 +548.5,94.11235046 +549,94.53853607 +549.5,94.97335815 +550,95.40241241 +550.5,95.5278244 +551,95.27069855 +551.5,94.67111969 +552,94.10951996 +552.5,93.82350159 +553,93.96655273 +553.5,94.22510529 +554,94.30596924 +554.5,93.95028687 +555,93.22734833 +555.5,92.43160248 +556,91.85984802 +556.5,91.59857178 +557,91.46416473 +557.5,91.15903473 +558,90.62069702 +558.5,89.95900726 +559,89.48021698 +559.5,89.27760315 +560,89.16210175 +560.5,88.74117279 +561,87.78595734 +561.5,85.9458313 +562,82.51617432 +562.5,76.75866699 +563,67.63407898 +563.5,54.81229401 +564,40.4200058 +564.5,26.35173798 +565,14.67542362 +565.5,6.932487488 +566,2.652906179 diff --git a/corgidrp/data/filter_curves/transmission_ID-06_1B_v0.csv b/corgidrp/data/filter_curves/transmission_ID-06_1B_v0.csv new file mode 100644 index 00000000..15311dd5 --- /dev/null +++ b/corgidrp/data/filter_curves/transmission_ID-06_1B_v0.csv @@ -0,0 +1,48 @@ +# Filter transmission curve produced by Materion. +#Truncated at T<1% due to measurement noise. +#True CGI curve may differ slightly (TBD) due to difference in f/number in Materion test setup and vacuum vs. air. +lambda_nm,%T +563.5,1.451493979 +564,4.515580654 +564.5,11.76853561 +565,25.17383194 +565.5,45.41886902 +566,67.71577454 +566.5,84.98130035 +567,92.03194427 +567.5,92.51402283 +568,91.65807343 +568.5,91.73574829 +569,92.72997284 +569.5,94.11154938 +570,95.14994049 +570.5,95.72999573 +571,95.99905396 +571.5,95.8134613 +572,95.31806946 +572.5,94.67775726 +573,94.37583923 +573.5,94.60755157 +574,95.20894623 +574.5,95.70319366 +575,95.7898941 +575.5,95.45360565 +576,95.04876709 +576.5,94.85934448 +577,94.85010529 +577.5,94.80921936 +578,94.26318359 +578.5,93.04077148 +579,91.59568787 +579.5,90.74327087 +580,90.9562149 +580.5,91.92352295 +581,92.78236389 +581.5,92.55296326 +582,90.2035141 +582.5,83.81760406 +583,70.45198059 +583.5,50.35218048 +584,27.84416008 +584.5,10.93383026 +585,3.046211004 diff --git a/corgidrp/data/filter_curves/transmission_ID-07_1C_v0.csv b/corgidrp/data/filter_curves/transmission_ID-07_1C_v0.csv new file mode 100644 index 00000000..c40b33de --- /dev/null +++ b/corgidrp/data/filter_curves/transmission_ID-07_1C_v0.csv @@ -0,0 +1,49 @@ +# Filter transmission curve produced by Materion. +#Truncated at T<1% due to measurement noise. +#True CGI curve may differ slightly (TBD) due to difference in f/number in Materion test setup and vacuum vs. air. +lambda_nm,%T +583.5,1.561838031 +584,5.457098484 +584.5,16.34264946 +585,40.28140259 +585.5,74.82595825 +586,93.52374268 +586.5,94.68582916 +587,93.90279388 +587.5,92.87062836 +588,92.6352005 +588.5,94.1960144 +589,96.23678589 +589.5,97.15402222 +590,96.92851257 +590.5,95.53081512 +591,93.58590698 +591.5,92.25596619 +592,92.45136261 +592.5,93.88538361 +593,95.20861816 +593.5,95.76273346 +594,95.31964111 +594.5,93.58731079 +595,90.53212738 +595.5,87.55470276 +596,86.60009766 +596.5,88.40273285 +597,91.7831192 +597.5,94.45657349 +598,94.8568573 +598.5,93.220047 +599,91.03125 +599.5,89.73204803 +600,89.68844604 +600.5,90.78850555 +601,91.87247467 +601.5,91.84210968 +602,90.63880157 +602.5,89.06274414 +603,86.11582947 +603.5,77.70343781 +604,55.62945175 +604.5,23.51212692 +605,6.581247807 +605.5,1.783726215 diff --git a/corgidrp/data/filter_curves/transmission_ID-08_2A_v0.csv b/corgidrp/data/filter_curves/transmission_ID-08_2A_v0.csv new file mode 100644 index 00000000..cb2fa12b --- /dev/null +++ b/corgidrp/data/filter_curves/transmission_ID-08_2A_v0.csv @@ -0,0 +1,55 @@ +# Filter transmission curve produced by Materion. +#Truncated at T<1% due to measurement noise. +#True CGI curve may differ slightly (TBD) due to difference in f/number in Materion test setup and vacuum vs. air. +lambda_nm,%T +602,4.636655807 +602.5,18.44475937 +603,45.6701355 +603.5,75.24854279 +604,92.82148743 +604.5,97.49591064 +605,97.62587738 +605.5,96.6824646 +606,95.59482574 +606.5,95.42743683 +607,96.27185822 +607.5,97.14804077 +608,97.52832031 +608.5,98.03383636 +609,98.03119659 +609.5,98.21874237 +610,98.08547211 +610.5,98.17147827 +611,98.26211548 +611.5,97.99932098 +612,97.80449677 +612.5,98.08492279 +613,96.95513153 +613.5,96.17997742 +614,94.90947723 +614.5,95.20867157 +615,95.95687103 +615.5,97.11621094 +616,97.18592072 +616.5,96.20800781 +617,94.65257263 +617.5,94.21311951 +618,94.76306152 +618.5,96.14246368 +619,97.43959045 +619.5,98.14369202 +620,97.23971558 +620.5,96.76680756 +621,96.37485504 +621.5,97.25893402 +622,97.24951172 +622.5,96.31407928 +623,95.02629852 +623.5,93.1809845 +624,81.7776947 +624.5,56.93593597 +625,29.34465027 +625.5,12.46787262 +626,5.478643417 +626.5,2.291638136 +627,1.327066422 diff --git a/corgidrp/data/filter_curves/transmission_ID-09_2B_v0.csv b/corgidrp/data/filter_curves/transmission_ID-09_2B_v0.csv new file mode 100644 index 00000000..f70a7cfb --- /dev/null +++ b/corgidrp/data/filter_curves/transmission_ID-09_2B_v0.csv @@ -0,0 +1,42 @@ +# Filter transmission curve produced by Materion. +#Truncated at T<1% due to measurement noise. +#True CGI curve may differ slightly (TBD) due to difference in f/number in Materion test setup and vacuum vs. air. +lambda_nm,%T +630,2.553656578 +630.5,8.19055748 +631,19.72608948 +631.5,38.37952423 +632,59.34857178 +632.5,77.00325775 +633,88.63591003 +633.5,93.0453186 +634,92.14408875 +634.5,88.56638336 +635,85.49575043 +635.5,84.28400421 +636,84.51153564 +636.5,85.26911163 +637,87.07287598 +637.5,90.13430023 +638,93.22301483 +638.5,94.39846802 +639,93.27830505 +639.5,90.47745514 +640,86.89321136 +640.5,84.13012695 +641,83.66394806 +641.5,85.64718628 +642,88.91687775 +642.5,92.02707672 +643,94.17041779 +643.5,95.04003906 +644,94.95652008 +644.5,94.36031342 +645,92.5362854 +645.5,87.32347107 +646,76.77319336 +646.5,60.79568863 +647,41.72370911 +647.5,22.73674202 +648,8.722807884 +648.5,2.175623178 diff --git a/corgidrp/data/filter_curves/transmission_ID-10_2C_v0.csv b/corgidrp/data/filter_curves/transmission_ID-10_2C_v0.csv new file mode 100644 index 00000000..3816980e --- /dev/null +++ b/corgidrp/data/filter_curves/transmission_ID-10_2C_v0.csv @@ -0,0 +1,23 @@ +# Filter transmission curve produced by Materion. +#Truncated at T<1% due to measurement noise. +#True CGI curve may differ slightly (TBD) due to difference in f/number in Materion test setup and vacuum vs. air. +lambda_nm,%T +651.5,4.113111973 +652,12.86722851 +652.5,26.67666435 +653,43.87610245 +653.5,62.79453659 +654,78.4229126 +654.5,87.15448761 +655,90.04803467 +655.5,90.97179413 +656,91.26333618 +656.5,90.92850494 +657,89.92632294 +657.5,87.68118286 +658,81.8968811 +658.5,70.61357117 +659,54.88114929 +659.5,36.73434448 +660,18.66839027 +660.5,6.087434769 diff --git a/corgidrp/data/filter_curves/transmission_ID-11_3A_v0.csv b/corgidrp/data/filter_curves/transmission_ID-11_3A_v0.csv new file mode 100644 index 00000000..ce4461ff --- /dev/null +++ b/corgidrp/data/filter_curves/transmission_ID-11_3A_v0.csv @@ -0,0 +1,64 @@ +# Filter transmission curve produced by Materion. +#Truncated at T<1% due to measurement noise. +#True CGI curve may differ slightly (TBD) due to difference in f/number in Materion test setup and vacuum vs. air. +lambda_nm,%T +665.5,1.134022355 +666,3.027052641 +666.5,7.560984612 +667,16.21899223 +667.5,29.89443016 +668,46.38880157 +668.5,63.80021667 +669,79.69788361 +669.5,90.42789459 +670,95.97458649 +670.5,97.93623352 +671,98.5001297 +671.5,98.79405212 +672,99.01778412 +672.5,99.14266205 +673,99.12802887 +673.5,99.03844452 +674,98.95560455 +674.5,98.72371674 +675,98.53421783 +675.5,98.23595428 +676,97.87637329 +676.5,97.49089813 +677,97.24849701 +677.5,97.21984863 +678,97.39849091 +678.5,97.6439743 +679,97.81321716 +679.5,97.78489685 +680,97.55740356 +680.5,97.41259003 +681,97.40080261 +681.5,97.60211182 +682,97.93234253 +682.5,98.24222565 +683,98.38591003 +683.5,98.33356476 +684,98.1483078 +684.5,97.78238678 +685,97.47267151 +685.5,97.15261841 +686,96.98886871 +686.5,97.0112915 +687,97.21164703 +687.5,97.3359375 +688,97.24887085 +688.5,96.73415375 +689,96.3131485 +689.5,96.21191406 +690,96.43402863 +690.5,96.80392456 +691,96.23623657 +691.5,92.87851715 +692,84.25907135 +692.5,68.98257446 +693,48.39720917 +693.5,28.64116669 +694,13.55405903 +694.5,5.081246853 +695,1.736072421 diff --git a/corgidrp/data/filter_curves/transmission_ID-12_3B_v0.csv b/corgidrp/data/filter_curves/transmission_ID-12_3B_v0.csv new file mode 100644 index 00000000..3357eaf2 --- /dev/null +++ b/corgidrp/data/filter_curves/transmission_ID-12_3B_v0.csv @@ -0,0 +1,60 @@ +# Filter transmission curve produced by Materion. +#Truncated at T<1% due to measurement noise. +#True CGI curve may differ slightly (TBD) due to difference in f/number in Materion test setup and vacuum vs. air. +lambda_nm,%T +688.5,1.946741581 +689,5.350462914 +689.5,12.53374386 +690,24.29236221 +690.5,39.639328 +691,57.11380386 +691.5,73.82277679 +692,85.60559845 +692.5,91.84492493 +693,94.04990387 +693.5,94.42072296 +694,94.35018921 +694.5,94.35779572 +695,94.6394043 +695.5,95.1210022 +696,95.76455688 +696.5,96.58153534 +697,97.3102417 +697.5,97.84294128 +698,97.95516968 +698.5,97.83664703 +699,97.59008789 +699.5,97.38980865 +700,97.29090881 +700.5,97.38381958 +701,97.67137146 +701.5,97.94834137 +702,98.11876678 +702.5,97.9318161 +703,97.42292023 +703.5,96.77307892 +704,96.20483398 +704.5,95.9181366 +705,95.98274994 +705.5,96.45253754 +706,97.0042038 +706.5,97.55940247 +707,97.92317963 +707.5,98.04290771 +708,97.94683075 +708.5,97.6178894 +709,97.1673584 +709.5,96.65142059 +710,96.3724823 +710.5,96.27614594 +711,95.8735199 +711.5,94.50344849 +712,91.33155823 +712.5,84.94326019 +713,73.58865356 +713.5,57.71269226 +714,39.10775375 +714.5,21.52568245 +715,9.603467941 +715.5,3.545237064 +716,1.264983535 diff --git a/corgidrp/data/filter_curves/transmission_ID-14_3D_v0.csv b/corgidrp/data/filter_curves/transmission_ID-14_3D_v0.csv new file mode 100644 index 00000000..86f5a4a4 --- /dev/null +++ b/corgidrp/data/filter_curves/transmission_ID-14_3D_v0.csv @@ -0,0 +1,28 @@ +# Filter transmission curve produced by Materion. +#Truncated at T<1% due to measurement noise. +#True CGI curve may differ slightly (TBD) due to difference in f/number in Materion test setup and vacuum vs. air. +lambda_nm,%T +747.5,2.082982779 +748,6.220796585 +748.5,13.98272705 +749,25.13321304 +749.5,38.15190887 +750,52.07556915 +750.5,65.96637726 +751,78.72642517 +751.5,88.97520447 +752,94.90507507 +752.5,97.04955292 +753,97.49756622 +753.5,97.547966 +754,97.27757263 +754.5,96.06228638 +755,93.07421112 +755.5,87.17158508 +756,77.40446471999999 +756.5,64.99837494 +757,51.04321671 +757.5,36.78225708 +758,23.18589783 +758.5,11.5986948 +759,4.058616161 diff --git a/corgidrp/data/filter_curves/transmission_ID-15_3E_v0.csv b/corgidrp/data/filter_curves/transmission_ID-15_3E_v0.csv new file mode 100644 index 00000000..8ac2039f --- /dev/null +++ b/corgidrp/data/filter_curves/transmission_ID-15_3E_v0.csv @@ -0,0 +1,68 @@ +# Filter transmission curve produced by Materion. +#Truncated at T<1% due to measurement noise. +#True CGI curve may differ slightly (TBD) due to difference in f/number in Materion test setup and vacuum vs. air. +lambda_nm,%T +761,2.356164455 +761.5,6.102582932 +762,12.89969349 +762.5,22.58228874 +763,35.16889954 +763.5,49.91805649 +764,65.02502441 +764.5,78.116539 +765,86.63567352 +765.5,90.95330811 +766,92.7857132 +766.5,94.0993042 +767,95.08213043 +767.5,95.8321991 +768,96.0512085 +768.5,95.70610809 +769,94.94276428 +769.5,94.02412415 +770,93.29965973 +770.5,92.91426849 +771,92.78328705 +771.5,93.03421021 +772,93.47348785 +772.5,94.21273804 +773,95.0075531 +773.5,95.58717346 +774,95.59059143 +774.5,94.84735107 +775,93.72834778000001 +775.5,92.77915192 +776,92.27439117 +776.5,92.52194214 +777,93.43658447 +777.5,94.39068604 +778,95.19042206 +778.5,95.69364929 +779,95.81039429 +779.5,95.61376953 +780,95.1807785 +780.5,94.71299744 +781,94.05731201 +781.5,93.27639008 +782,92.81617737 +782.5,92.99130249 +783,93.38832855 +783.5,94.04758453 +784,94.16904449 +784.5,93.63576508 +785,92.43721008 +785.5,91.27432251 +786,90.8195343 +786.5,91.18838501 +787,92.0947113 +787.5,92.65519714 +788,92.28235626 +788.5,90.53547668 +789,87.04727173 +789.5,80.42462158 +790,68.45227814 +790.5,50.90424347 +791,31.63632965 +791.5,14.98308945 +792,5.034407616 +792.5,1.26908803 diff --git a/corgidrp/data/filter_curves/transmission_ID-16_4A_v0.csv b/corgidrp/data/filter_curves/transmission_ID-16_4A_v0.csv new file mode 100644 index 00000000..d62f3084 --- /dev/null +++ b/corgidrp/data/filter_curves/transmission_ID-16_4A_v0.csv @@ -0,0 +1,74 @@ +# Filter transmission curve produced by Materion. +#Truncated at T<1% due to measurement noise. +#True CGI curve may differ slightly (TBD) due to difference in f/number in Materion test setup and vacuum vs. air. +lambda_nm,%T +774,1.503393054 +774.5,3.779611826 +775,8.309972763 +775.5,15.65544415 +776,25.70594215 +776.5,37.66309738 +777,51.50606537 +777.5,65.68690491 +778,78.5421524 +778.5,88.55155945 +779,94.18572235 +779.5,96.49324036 +780,97.04647064 +780.5,97.10809326 +781,97.26959991 +781.5,97.21054077 +782,97.16724396 +782.5,97.19725037 +783,97.15886688 +783.5,97.07201385 +784,96.92201233 +784.5,96.7592392 +785,96.54304504 +785.5,96.44905853 +786,96.46020508 +786.5,96.50248718 +787,96.73768616 +787.5,97.0883255 +788,97.42559814 +788.5,97.68354034 +789,97.79048157 +789.5,97.73381042 +790,97.48413849 +790.5,97.00508118 +791,96.44545746 +791.5,95.81676483 +792,95.3844223 +792.5,95.07421875 +793,94.90450287 +793.5,94.97158051 +794,95.10639954 +794.5,95.36252594 +795,95.45411682 +795.5,95.37888336 +796,95.01003265 +796.5,94.66210938 +797,94.37471008 +797.5,94.11423492 +798,94.25786591 +798.5,94.52485657 +799,94.89575958 +799.5,95.07430267 +800,94.94374847 +800.5,94.28076935 +801,93.53063965 +801.5,92.86895752 +802,92.53860474 +802.5,92.74726868 +803,93.31552124 +803.5,93.34109497 +804,92.45952606 +804.5,90.18874359 +805,86.55696106 +805.5,80.10902405 +806,69.63167572 +806.5,54.95596695 +807,38.22042465 +807.5,22.58443069 +808,10.41745663 +808.5,3.631387949 diff --git a/corgidrp/data/filter_curves/transmission_ID-17_4B_v0.csv b/corgidrp/data/filter_curves/transmission_ID-17_4B_v0.csv new file mode 100644 index 00000000..df57ed59 --- /dev/null +++ b/corgidrp/data/filter_curves/transmission_ID-17_4B_v0.csv @@ -0,0 +1,71 @@ +# Filter transmission curve produced by Materion. +#Truncated at T<1% due to measurement noise. +#True CGI curve may differ slightly (TBD) due to difference in f/number in Materion test setup and vacuum vs. air. +lambda_nm,%T +807,1.204182625 +807.5,3.162686825 +808,7.343564034 +808.5,14.45257187 +809,24.60978889 +809.5,37.52537155 +810,52.61500549 +810.5,67.94036102 +811,80.70885468 +811.5,88.80978394 +812,92.34494019 +812.5,93.37674713 +813,94.07345581 +813.5,94.54816437 +814,94.99471283 +814.5,95.1005249 +815,94.73417664 +815.5,94.10783386 +816,93.57420349 +816.5,93.02722931 +817,92.77297974 +817.5,93.06060028 +818,93.31785583 +818.5,93.69159698 +819,94.25982666 +819.5,94.65058136 +820,95.08589172 +820.5,95.60163879 +821,95.60066986 +821.5,95.23110199 +822,94.70072937 +822.5,93.72721863 +823,92.96554565 +823.5,92.28098297 +824,92.15089417 +824.5,92.54603577 +825,93.4567337 +825.5,94.09523773 +826,95.0124054 +826.5,95.68981171 +827,95.94650269 +827.5,95.79462433 +828,95.30389404 +828.5,94.60430908 +829,93.90944672 +829.5,93.71763611 +830,93.37611389 +830.5,93.89476013 +831,94.08364105 +831.5,94.21252441 +832,93.97090912 +832.5,93.44459534 +833,92.98474884 +833.5,92.70753479 +834,92.84369659 +834.5,93.07475281 +835,93.07562256 +835.5,92.08370209 +836,89.60687256 +836.5,84.52487183 +837,75.20609283 +837.5,60.84946823 +838,43.11647797 +838.5,25.54709625 +839,11.75564098 +839.5,4.041862488 +840,1.16523838 diff --git a/corgidrp/data/filter_curves/transmission_ID-18_4C_v0.csv b/corgidrp/data/filter_curves/transmission_ID-18_4C_v0.csv new file mode 100644 index 00000000..c52a80f8 --- /dev/null +++ b/corgidrp/data/filter_curves/transmission_ID-18_4C_v0.csv @@ -0,0 +1,75 @@ +# Filter transmission curve produced by Materion. +#Truncated at T<1% due to measurement noise. +#True CGI curve may differ slightly (TBD) due to difference in f/number in Materion test setup and vacuum vs. air. +lambda_nm,%T +838.5,2.281103373 +839,5.318284988 +839.5,10.84360409 +840,18.93813515 +840.5,29.8079319 +841,43.190513609999996 +841.5,57.94519806 +842,72.30450439 +842.5,83.36347961 +843,89.83574677 +843.5,92.38195801 +844,93.4364624 +844.5,93.9568634 +845,94.56826782 +845.5,94.81633759 +846,94.91538239 +846.5,94.56040955 +847,93.67848969 +847.5,92.65696716 +848,91.74810028 +848.5,90.90318298 +849,90.3300705 +849.5,90.33866119 +850,91.06642151 +850.5,92.27986145 +851,93.69274902 +851.5,94.80098724 +852,95.55825043 +852.5,95.9524765 +853,95.79483032 +853.5,95.19686127 +854,93.69874573 +854.5,92.26284027 +855,90.83835602 +855.5,89.50224304 +856,88.82529449 +856.5,89.07236481 +857,89.94302368 +857.5,90.75970459 +858,91.42999268 +858.5,91.76212311 +859,91.50287628 +859.5,91.2424469 +860,91.27848816 +860.5,91.71116638 +861,92.62174225 +861.5,93.36694336 +862,94.41060638 +862.5,94.79428864 +863,94.81108856 +863.5,94.24248505 +864,92.94239044 +864.5,91.71135712 +865,90.43691254 +865.5,89.32411957 +866,89.05238342 +866.5,89.22154999 +867,89.72057343 +867.5,89.97161865 +868,89.86342621 +868.5,89.53034973 +869,88.51000214 +869.5,86.52971649 +870,82.41040039 +870.5,74.24715424 +871,60.69789505 +871.5,44.2693634 +872,26.7557354 +872.5,12.78135014 +873,4.538541794 +873.5,1.315397739 diff --git a/corgidrp/data/filter_curves/transmission_ID-21_3C_v0.csv b/corgidrp/data/filter_curves/transmission_ID-21_3C_v0.csv new file mode 100644 index 00000000..accef367 --- /dev/null +++ b/corgidrp/data/filter_curves/transmission_ID-21_3C_v0.csv @@ -0,0 +1,55 @@ +# Filter transmission curve produced by Materion. +#Truncated at T<1% due to measurement noise. +#True CGI curve may differ slightly (TBD) due to difference in f/number in Materion test setup and vacuum vs. air. +lambda_nm,%T +713,1.47617805 +713.5,3.800706148 +714,8.461511612 +714.5,15.99529362 +715,26.27761459 +715.5,39.15308762 +716,53.45459747 +716.5,68.07023621 +717,80.80206299 +717.5,89.79665375 +718,94.92982483 +718.5,96.85245514 +719,97.3427887 +719.5,97.51346588 +720,97.77384949 +720.5,98.13366699 +721,98.47615051 +721.5,98.74281311 +722,98.84688568 +722.5,98.83795166 +723,98.7329483 +723.5,98.51490784 +724,98.29811859 +724.5,98.11035156 +725,97.94398499 +725.5,97.75770569 +726,97.49851227 +726.5,97.2520752 +727,96.88398743 +727.5,96.65181732 +728,96.62328339 +728.5,96.77813721 +729,97.02037811 +729.5,97.28778839 +730,97.53518677 +730.5,97.68135834 +731,97.77599335 +731.5,97.74250031 +732,97.49845123 +732.5,97.13707733 +733,96.41797638 +733.5,95.20082855 +734,92.67784119 +734.5,87.44489288 +735,77.77334595 +735.5,62.90353012 +736,45.57038879 +736.5,28.51318932 +737,14.28209877 +737.5,5.899062634 +738,2.08187151 diff --git a/corgidrp/data/filter_curves/transmission_ID-22_3G_v0.csv b/corgidrp/data/filter_curves/transmission_ID-22_3G_v0.csv new file mode 100644 index 00000000..5ecf8e77 --- /dev/null +++ b/corgidrp/data/filter_curves/transmission_ID-22_3G_v0.csv @@ -0,0 +1,63 @@ +# Filter transmission curve produced by Materion. +#Truncated at T<1% due to measurement noise. +#True CGI curve may differ slightly (TBD) due to difference in f/number in Materion test setup and vacuum vs. air. +lambda_nm,%T +737.5,1.878973484 +738,4.320493698 +738.5,8.791771889 +739,15.50291634 +739.5,24.63331795 +740,36.67280197 +740.5,49.93404007 +741,63.84884644 +741.5,77.20352173 +742,86.7252655 +742.5,92.26239014 +743,94.60433197 +743.5,95.33828735 +744,95.35644531 +744.5,94.82652283 +745,93.70857239 +745.5,92.05451202 +746,90.39134216 +746.5,89.09729767 +747,88.72369385 +747.5,89.42905426 +748,90.93803406 +748.5,92.62509918 +749,94.11195374 +749.5,95.42094421 +750,96.17150116 +750.5,96.28593445 +751,95.47412109 +751.5,94.05625916 +752,92.43273163 +752.5,91.48441315 +753,91.69833374 +753.5,93.05688477 +754,94.55892181 +754.5,95.50753784 +755,95.5157547 +755.5,94.72205353 +756,93.7824707 +756.5,93.04434967 +757,92.60745239 +757.5,92.17529297 +758,91.8886261 +758.5,91.84381104 +759,92.21255493 +759.5,92.73954773 +760,93.11281586 +760.5,93.15596771 +761,92.77539063 +761.5,92.40364075 +762,91.91220093 +762.5,91.22557068 +763,88.26169586 +763.5,81.0640564 +764,67.62430573 +764.5,49.36477661 +765,29.80702209 +765.5,13.75210094 +766,5.071521759 +766.5,1.623491526 diff --git a/corgidrp/fluxcal.py b/corgidrp/fluxcal.py new file mode 100644 index 00000000..ff2b51e2 --- /dev/null +++ b/corgidrp/fluxcal.py @@ -0,0 +1,241 @@ +# This module is written to do an absolute flux calibration observing a standard star having CALSPEC data. +import glob +import os +import numpy as np +from astropy.io import fits, ascii +from scipy import integrate +import urllib +import corgidrp + +# Dictionary of anticipated bright and dim CASLPEC standard star names and corresponding fits names +calspec_names= { +# bright standards +'109 Vir': '109vir_stis_005.fits', +'Vega': 'alpha_lyr_stis_011.fits', +'Eta Uma': 'etauma_stis_008.fits', +'Lam Lep': 'lamlep_stis_008.fits', +'KSI2 CETI': 'ksi2ceti_stis_008.fits', +# dim standards +'TYC 4433-1800-1': '1808347_stiswfc_006.fits', +'TYC 4205-1677-1': '1812095_stisnic_008.fits', +'TYC 4212-455-1': '1757132_stiswfc_006.fits', +'TYC 4209-1396-1': '1805292_stisnic_008.fits', +'TYC 4413-304-1': 'p041c_stisnic_010.fits', +'UCAC3 313-62260': 'kf08t3_stisnic_005.fits', +'BPS BS 17447-0067': '1802271_stiswfcnic_006.fits', +'TYC 4424-1286-1': '1732526_stisnic_009.fits', +'GSC 02581-02323': 'p330e_stiswfcnic_007.fits', +'TYC 4207-219-1': '1740346_stisnic_005.fits' +} + +calspec_url = 'https://archive.stsci.edu/hlsps/reference-atlases/cdbs/current_calspec/' + +def get_calspec_file(star_name): + """ + download the corresponding CALSPEC fits file and return the file path + + Args: + star_name (str): + + Returns: + str: file path + """ + if star_name not in calspec_names: + raise ValueError('{0} is not in list of anticipated standard stars {1}, please check naming'.format(star_name, calspec_names.keys()) ) + fits_name = calspec_names.get(star_name) + # TODO: be flexible with the version of the calspec fits file, so essentially, the number in the name should not matter + fits_url = calspec_url + fits_name + try: + calspec_dir = os.path.join(os.path.dirname(corgidrp.config_filepath), "calspec_data") + if not os.path.exists(calspec_dir): + os.mkdir(calspec_dir) + file_name, headers = urllib.request.urlretrieve(fits_url, filename = os.path.join(calspec_dir, fits_name)) + except: + raise Exception("cannot access CALSPEC archive web page and/or download {0}".format(fits_name)) + return file_name + +def get_filter_name(dataset): + """ + return the name of the transmission curve csv file of used color filter + + Args: + dataset (corgidrp.Dataset): dataset of the observed calstar + + Returns: + str: filepath of the selected filter curve + """ + datadir = os.path.join(os.path.dirname(__file__), "data", "filter_curves") + filters = os.path.join(datadir, "*.csv") + filter = dataset[0].ext_hdr['CFAMNAME'] + filter_names = os.listdir(datadir) + + filter_name = [name for name in filter_names if filter in name] + if filter_name == []: + raise ValueError("there is no filter available with name {0}".format(filter)) + else: + return os.path.join(datadir,filter_name[0]) + +def read_filter_curve(filter_filename): + """ + read the transmission curve csv file of the color filters + + Args: + filter_filename (str): file name of the transmission curve data + + Returns: + lambda_nm (np.array): wavelength in unit Angstroem + transmission (np.array): transmission of the filter < 1 + """ + tab = ascii.read(filter_filename, format='csv', header_start = 3, data_start = 4) + lambda_nm = tab['lambda_nm'].data #unit nm + transmission = tab['%T'].data + return lambda_nm * 10 , transmission/100. + +def read_cal_spec(calspec_filename, filter_wavelength): + """ + read the calspec flux density data interpolated on the wavelength grid of the transmission curve + + Args: + calspec_filename (str): file name of the CALSPEC fits file + filter_wavelength (np.array): wavelength grid of the transmission curve in unit Angstroem + + Returns: + np.array: flux density in Jy interpolated on the wavelength grid of the transmission curve + in CALSPEC units erg/(s * cm^2 * AA) + """ + hdulist = fits.open(calspec_filename) + data = hdulist[1].data + hdulist.close() + w = data['WAVELENGTH'] #wavelength in Angstroem + flux = data['FLUX'] + flux = flux[(w<=filter_wavelength[-1]) & (w>=filter_wavelength[0])] #erg/(s*cm^2*AA) + w = w[(w<=filter_wavelength[-1]) & (w>=filter_wavelength[0])] + + #interpolate on transmission curve wavelengths + flux_inter = np.interp(filter_wavelength, w, flux) + + return flux_inter + +def calculate_band_flux(filter_curve, calspec_flux, filter_wavelength): + """ + calculate the average band flux of a calspec source in the filter band, see convention A in Gordon et al. (2022) + TBC if needed at all + + Args: + filter_curve (np.array): filter transmission curve over the filter_wavelength + calspec_flux (np.array): converted flux in units of erg/(s*cm^2*AA) of the calpec source in the filter band + filter_wavelength (np.array): wavelengths in units Angstroem in the filter band + + Returns: + float: average band flux of the calspec star in unit erg/(s*cm^2*AA) + """ + multi_flux = calspec_flux * filter_curve * filter_wavelength + multi_band = filter_curve * filter_wavelength + aver_flux = integrate.simpson(multi_flux, x=filter_wavelength)/integrate.simpson(multi_band, x=filter_wavelength) + + return aver_flux + +def calculate_effective_lambda(filter_curve, calspec_flux, filter_wavelength): + """ + calculate the effective wavelength of a calspec source in the filter band, see convention A in Gordon et al. (2022) + TBC if needed at all + + Args: + filter_curve (np.array): filter transmission curve over the filter_wavelength + calspec_flux (np.array): converted flux in units of the calpec source in the filter band + filter_wavelength (np.array): wavelengths in units nm in the filter band + + Returns: + float: effective wavelength in unit Angstroem + """ + multi_flux = calspec_flux * filter_curve * np.square(filter_wavelength) + multi_band = calspec_flux * filter_curve * filter_wavelength + eff_lambda = integrate.simpson(multi_flux, x=filter_wavelength)/integrate.simpson(multi_band, x=filter_wavelength) + + return eff_lambda + +def calculate_pivot_lambda(filter_curve, filter_wavelength): + """ + calculate the reference pivot wavelength of the filter band, see convention B in Gordon et al. (2022) + + Args: + filter_curve (np.array): filter transmission curve over the filter_wavelength + filter_wavelength (np.array): wavelengths in unit Angstroem in the filter band + + Returns: + float: pivot wavelength in unit Angstroem + """ + multi_flux = filter_curve * filter_wavelength + multi_band = filter_curve / filter_wavelength + piv_lambda = np.sqrt(integrate.simpson(multi_flux, x=filter_wavelength)/integrate.simpson(multi_band, x=filter_wavelength)) + + return piv_lambda + +def calculate_flux_ref(filter_wavelength, calspec_flux, wave_ref): + """ + calculate the flux at the reference wavelength of the filter band + + Args: + filter_wavelength (np.array): wavelengths in unit Angstroem in the filter band + calspec_flux (np.array): converted flux in units of the calpec source in the filter band + wave_ref (float): reference wavelength in unit Angstroem + + Returns: + float: flux at reference wavelength in unit erg/(s*cm^2*AA) + """ + + flux_ref = np.interp(wave_ref, filter_wavelength, calspec_flux) + return flux_ref + + +def compute_color_cor(filter_curve, filter_wavelength , flux_ref, wave_ref, flux_source): + """ + Compute the color correction factor K given the filter bandpass, reference spectrum (CALSPEC), + and source spectrum model. To use this color correction, divide the flux density + for a band by K. Such color corrections are needed to compute the correct + flux density at the reference wavelength for a source with the flux_source + spectral shape in the photometric convention that provides the flux density + at a reference wavelength (convention B, see Gordon et al. 2022, The Astronomical Journal 163:267, for details). + Thus the flux density value found by applying the calibration factor on the found detected electrons + of an arbitrary source should be divided by K (for the appropriate filter and spectral shape) + to produce the flux density at the reference wavelength of the filter. + The color correction adjusts the calibration factor to align the reference spectral shape + with the current source, which results in the correct flux density at the reference wavelength. + + Args: + filter_curve (np.array): transmission of the filter bandpass + filter_wavelength (np.array): the wavelengths of the filter bandpass, flux_ref, and flux_source in unit Angstroem + flux_ref (np.array): reference flux density F(lambda) as a function of wavelength + wave_ref (float): reference wavelength in unit Angstroem + flux_source (np.array): source flux density F(lambda) as a function of wavelength in CALSPEC unit erg/(s * cm^2 * AA) + + Returns: + float: color correction factor K + """ + # get the flux densities at the reference wavelength + flux_source_lambda_ref = calculate_flux_ref(filter_wavelength, flux_source, wave_ref) + flux_ref_lambda_ref = calculate_flux_ref(filter_wavelength, flux_ref, wave_ref) + + # compute the top and bottom integrals + int_source = integrate.simpson(filter_wavelength * filter_curve * flux_source / flux_source_lambda_ref, x=filter_wavelength) + int_ref = integrate.simpson(filter_wavelength * filter_curve * flux_ref / flux_ref_lambda_ref, x=filter_wavelength) + + return int_source / int_ref + +def calculate_band_irradiance(filter_curve, calspec_flux, filter_wavelength): + """ + calculate the integrated band flux, irradiance of a calspec source in the filter band + to determine the apparent magnitude + + Args: + filter_curve (np.array): filter transmission curve over the filter_wavelength + calspec_flux (np.array): converted flux in units of erg/(s*cm^2*AA) of the calpec source in the filter band + filter_wavelength (np.array): wavelengths in units Angstroem in the filter band + + Returns: + float: band irradiance of the calspec star in unit erg/(s*cm^2) + """ + multi_flux = calspec_flux * filter_curve + irrad = integrate.simpson(multi_flux, x=filter_wavelength) + + return irrad diff --git a/corgidrp/l2a_to_l2b.py b/corgidrp/l2a_to_l2b.py index 20910450..37942a5f 100644 --- a/corgidrp/l2a_to_l2b.py +++ b/corgidrp/l2a_to_l2b.py @@ -1,11 +1,10 @@ # A file that holds the functions that transmogrify l2a data to l2b data import numpy as np from scipy.interpolate import interp1d - +import copy import corgidrp.data as data from corgidrp.darks import build_synthesized_dark -from corgidrp.detector import detector_areas -from corgidrp.detector import ENF +from corgidrp.detector import detector_areas, ENF def add_photon_noise(input_dataset, kgain, detector_params): """ @@ -144,6 +143,12 @@ def flat_division(input_dataset, flat_field): #Divide by the master flat flatdiv_cube = flatdiv_dataset.all_data / flat_field.data + #Find where the flat_field is 0 and set a DQ flag: + where_zero = np.where(flat_field.data == 0) + flatdiv_dq = copy.deepcopy(flatdiv_dataset.all_dq) + for i in range(len(flatdiv_dataset)): + flatdiv_dq[i][where_zero] = np.bitwise_or(flatdiv_dataset[i].dq[where_zero], 4) + # propagate the error of the master flat frame if hasattr(flat_field, "err"): flatdiv_dataset.rescale_error(1/flat_field.data, "FlatField") @@ -154,7 +159,7 @@ def flat_division(input_dataset, flat_field): history_msg = "Flat calibration done using Flat field {0}".format(flat_field.filename) # update the output dataset with this new flat calibrated data and update the history - flatdiv_dataset.update_after_processing_step(history_msg,new_all_data=flatdiv_cube) + flatdiv_dataset.update_after_processing_step(history_msg,new_all_data=flatdiv_cube, new_all_dq = flatdiv_dq) return flatdiv_dataset diff --git a/corgidrp/l3_to_l4.py b/corgidrp/l3_to_l4.py index 7c73644a..28280073 100644 --- a/corgidrp/l3_to_l4.py +++ b/corgidrp/l3_to_l4.py @@ -1,5 +1,13 @@ # A file that holds the functions that transmogrify l3 data to l4 data +from pyklip.klip import rotate +from corgidrp import data +from scipy.ndimage import rotate as rotate_scipy # to avoid duplicated name +from scipy.ndimage import shift +import warnings +import numpy as np +import glob + def distortion_correction(input_dataset, distortion_calibration): """ @@ -29,6 +37,132 @@ def find_star(input_dataset): return input_dataset.copy() +def crop(input_dataset,sizexy=60,centerxy=None): + """ + + Crop the Images in a Dataset to a desired field of view. Default behavior is to + crop the image to the dark hole region, centered on the pixel intersection nearest + to the star location. Assumes 3D Image data is a stack of 2D data arrays, so only + crops the last two indices. Currently only configured for HLC mode. + + TODO: + - Pad with nans if you try to crop outside the array (handle err & DQ too) + - Option to crop to an odd data array and center on a pixel? + + Args: + input_dataset (corgidrp.data.Dataset): a dataset of Images (any level) + sizexy (int or array of int): desired frame size, if only one number is provided the + desired shape is assumed to be square, otherwise xy order. Defaults to 60. + centerxy (float or array of float): desired center (xy order), should be a pixel intersection (a.k.a + half-integer) otherwise the function rounds to the nearest intersection. Defaults to the + "STARLOCX/Y" header values. + + Returns: + corgidrp.data.Dataset: a version of the input dataset cropped to the desired FOV. + """ + + # Copy input dataset + dataset = input_dataset.copy() + + # Require even data shape + if not np.all(np.array(sizexy)%2==0): + raise UserWarning('Even sizexy is required.') + + # Need to loop over frames and reinit dataset because array sizes change + frames_out = [] + + for frame in dataset: + prihdr = frame.pri_hdr + exthdr = frame.ext_hdr + dqhdr = frame.dq_hdr + errhdr = frame.err_hdr + + # Require that mode is HLC for now + if not prihdr['MODE'] == 'HLC': + raise UserWarning('Crop function is currently only configured for mode HLC.') + + # Assign new array sizes and center location + frame_shape = frame.data.shape + if isinstance(sizexy,int): + sizexy = [sizexy]*2 + if isinstance(centerxy,float): + centerxy = [centerxy] * 2 + elif centerxy is None: + if ("STARLOCX" in exthdr.keys()) and ("STARLOCY" in exthdr.keys()): + centerxy = np.array([exthdr["STARLOCX"],exthdr["STARLOCY"]]) + else: raise ValueError('centerxy not provided but STARLOCX/Y are missing from image extension header.') + + # Round to centerxy to nearest half-pixel + centerxy = np.array(centerxy) + if not np.all((centerxy-0.5)%1 == 0): + old_centerxy = centerxy.copy() + centerxy = np.round(old_centerxy-0.5)+0.5 + warnings.warn(f'Desired center {old_centerxy} is not at the intersection of 4 pixels. Centering on the nearest intersection {centerxy}') + + # Crop the data + start_ind = (centerxy + 0.5 - np.array(sizexy)/2).astype(int) + end_ind = (centerxy + 0.5 + np.array(sizexy)/2).astype(int) + x1,y1 = start_ind + x2,y2 = end_ind + + # Check if cropping outside the FOV + xleft_pad = -x1 if (x1<0) else 0 + xrright_pad = x2-frame_shape[-1]+1 if (x2 > frame_shape[-1]) else 0 + ybelow_pad = -y1 if (y1<0) else 0 + yabove_pad = y2-frame_shape[-2]+1 if (y2 > frame_shape[-2]) else 0 + + if np.any(np.array([xleft_pad,xrright_pad,ybelow_pad,yabove_pad])> 0) : + raise ValueError("Trying to crop to a region outside the input data array. Not yet configured.") + + if frame.data.ndim == 2: + cropped_frame_data = frame.data[y1:y2,x1:x2] + cropped_frame_err = frame.err[:,y1:y2,x1:x2] + cropped_frame_dq = frame.dq[y1:y2,x1:x2] + elif frame.data.ndim == 3: + cropped_frame_data = frame.data[:,y1:y2,x1:x2] + cropped_frame_err = frame.err[:,:,y1:y2,x1:x2] + cropped_frame_dq = frame.dq[:,y1:y2,x1:x2] + else: + raise ValueError('Crop function only supports 2D or 3D frame data.') + + # Update headers + exthdr["NAXIS1"] = sizexy[0] + exthdr["NAXIS2"] = sizexy[1] + dqhdr["NAXIS1"] = sizexy[0] + dqhdr["NAXIS2"] = sizexy[1] + errhdr["NAXIS1"] = sizexy[0] + errhdr["NAXIS2"] = sizexy[1] + errhdr["NAXIS3"] = cropped_frame_err.shape[-3] + if frame.data.ndim == 3: + exthdr["NAXIS3"] = frame.data.shape[0] + dqhdr["NAXIS3"] = frame.dq.shape[0] + errhdr["NAXIS4"] = frame.err.shape[0] + + updated_hdrs = [] + if ("STARLOCX" in exthdr.keys()): + exthdr["STARLOCX"] -= x1 + exthdr["STARLOCY"] -= y1 + updated_hdrs.append('STARLOCX/Y') + if ("MASKLOCX" in exthdr.keys()): + exthdr["MASKLOCX"] -= x1 + exthdr["MASKLOCY"] -= y1 + updated_hdrs.append('MASKLOCX/Y') + if ("CRPIX1" in prihdr.keys()): + prihdr["CRPIX1"] -= x1 + prihdr["CRPIX2"] -= y1 + updated_hdrs.append('CRPIX1/2') + new_frame = data.Image(cropped_frame_data,prihdr,exthdr,cropped_frame_err,cropped_frame_dq,frame.err_hdr,frame.dq_hdr) + frames_out.append(new_frame) + + output_dataset = data.Dataset(frames_out) + + history_msg = f"""Frames cropped to new shape {output_dataset[0].data.shape} on center {centerxy}.\ + Updated header kws: {", ".join(updated_hdrs)}.""" + + output_dataset.update_after_processing_step(history_msg) + + return output_dataset + def do_psf_subtraction(input_dataset, reference_star_dataset=None): """ @@ -44,6 +178,90 @@ def do_psf_subtraction(input_dataset, reference_star_dataset=None): return input_dataset.copy() +def northup(input_dataset,correct_wcs=False): + """ + Derotate the Image, ERR, and DQ data by the angle offset to make the FoV up to North. + Now tentatively assuming 'ROLL' in the primary header incorporates all the angle offset, and the center of the FoV is the star position. + WCS correction is not yet implemented - TBD. + + Args: + input_dataset (corgidrp.data.Dataset): a dataset of Images (L3-level) + correct_wcs: if you want to correct WCS solutions after rotation, set True. Now hardcoded with not using astr_hdr. + + Returns: + corgidrp.data.Dataset: North is up, East is left + + """ + + # make a copy + processed_dataset = input_dataset.copy() + + new_all_data = []; new_all_err = []; new_all_dq = [] + for processed_data in processed_dataset: + # read the roll angle parameter, assuming this info is recorded in the primary header as requested + roll_angle = processed_data.pri_hdr['ROLL'] + + ## image extension ## + im_hd = processed_data.ext_hdr + im_data = processed_data.data + ylen, xlen = im_data.shape + + # define the center for rotation + try: + xcen, ycen = im_hd['PSFCENTX'], im_hd['PSFCENTY'] # TBU, after concluding the header keyword + except KeyError: + xcen, ycen = xlen/2, ylen/2 + + # look for WCS solutions + if correct_wcs is False: + astr_hdr = None + else: + astr_hdr = None # hardcoded now, no WCS information in the header + + # derotate + im_derot = rotate(im_data,-roll_angle,(xcen,ycen),astr_hdr=astr_hdr) + new_all_data.append(im_derot) + ############## + + ## HDU ERR ## + err_data = processed_data.err + err_derot = np.expand_dims(rotate(err_data[0],-roll_angle,(xcen,ycen)), axis=0) # err data shape is 1x1024x1024 + new_all_err.append(err_derot) + ############# + + ## HDU DQ ## + # all DQ pixels must have integers, use scipy.ndimage.rotate with order=0 instead of klip.rotate (rotating the other way) + dq_data = processed_data.dq + if xcen != xlen/2 or ycen != ylen/2: + # padding, shifting (rot center to image center), rotating, re-shift (image center to rot center), and cropping + # calculate shift values + xshift = xcen-xlen/2; yshift = ycen-ylen/2 + + # pad and shift + pad_x = int(np.ceil(abs(xshift))); pad_y = int(np.ceil(abs(yshift))) + dq_data_padded = np.pad(dq_data,pad_width=((pad_y, pad_y), (pad_x, pad_x)),mode='constant',constant_values=np.nan) + dq_data_padded_shifted = shift(dq_data_padded,(-yshift,-xshift),order=0,mode='constant',cval=np.nan) + + # define slices for cropping + crop_x = slice(pad_x,pad_x+xlen); crop_y = slice(pad_y,pad_y+ylen) + + # rotate, re-shift, and crop + dq_derot = shift(rotate_scipy(dq_data_padded_shifted, roll_angle, order=0, mode='constant', reshape=False, cval=np.nan),\ + (yshift,xshift),order=0,mode='constant',cval=np.nan)[crop_y,crop_x] + else: + # simply rotate + dq_derot = rotate_scipy(dq_data, roll_angle, order=0, mode='constant', reshape=False, cval=np.nan) + + new_all_dq.append(dq_derot) + ############ + + hisotry_msg = f'FoV rotated by {-roll_angle}deg counterclockwise at a roll center {xcen, ycen}' + + processed_dataset.update_after_processing_step(hisotry_msg, new_all_data=np.array(new_all_data), new_all_err=np.array(new_all_err),\ + new_all_dq=np.array(new_all_dq)) + + return processed_dataset + def update_to_l4(input_dataset): """ Updates the data level to L4. Only works on L3 data. @@ -75,4 +293,4 @@ def update_to_l4(input_dataset): history_msg = "Updated Data Level to L4" updated_dataset.update_after_processing_step(history_msg) - return updated_dataset \ No newline at end of file + return updated_dataset diff --git a/corgidrp/l4_to_tda.py b/corgidrp/l4_to_tda.py new file mode 100644 index 00000000..5b55067f --- /dev/null +++ b/corgidrp/l4_to_tda.py @@ -0,0 +1,133 @@ +# A file that holds the functions that transmogrify l4 data to TDA (Technical Demo Analysis) data +import corgidrp.fluxcal as fluxcal +import numpy as np + +def determine_app_mag(input_dataset, source_star, scale_factor = 1.): + """ + determine the apparent Vega magnitude of the observed source + in the used filter band and put it into the header. + We assume that each frame in the dataset was observed with the same color filter. + + Args: + input_dataset (corgidrp.data.Dataset): a dataset of Images (L2b-level) + source_star (str): either the fits file path of the flux model of the observed source in + CALSPEC units (erg/(s * cm^2 * AA) and format or the (SIMBAD) name of a CALSPEC star + scale_factor (float): factor applied to the flux of the calspec standard source, so that you can apply it + if you have a different source with similiar spectral type, but no calspec standard. + Defaults to 1. + + Returns: + corgidrp.data.Dataset: a version of the input dataset with updated header including + the apparent magnitude + """ + mag_dataset = input_dataset.copy() + # get the filter name from the header keyword 'CFAMNAME' + filter_name = fluxcal.get_filter_name(mag_dataset) + # read the transmission curve from the color filter file + wave, filter_trans = fluxcal.read_filter_curve(filter_name) + + if source_star.split(".")[-1] == "fits": + source_filepath = source_star + else: + source_filepath = fluxcal.get_calspec_file(source_star) + + vega_filepath = fluxcal.get_calspec_file('Vega') + + # calculate the flux of VEGA and the source star from the user given CALSPEC file binned on the wavelength grid of the filter + vega_sed = fluxcal.read_cal_spec(vega_filepath, wave) + source_sed = fluxcal.read_cal_spec(source_filepath, wave) * scale_factor + #Calculate the irradiance of vega and the source star in the filter band + vega_irr = fluxcal.calculate_band_irradiance(filter_trans, vega_sed, wave) + source_irr = fluxcal.calculate_band_irradiance(filter_trans, source_sed, wave) + #calculate apparent magnitude + app_mag = -2.5 * np.log10(source_irr/vega_irr) + # write the reference wavelength and the color correction factor to the header (keyword names tbd) + history_msg = "the apparent Vega magnitude is calculated and added to the header {0}".format(str(app_mag)) + # update the header of the output dataset and update the history + mag_dataset.update_after_processing_step(history_msg, header_entries = {"APP_MAG": app_mag}) + + return mag_dataset + + +def determine_color_cor(input_dataset, ref_star, source_star): + """ + determine the color correction factor of the observed source + at the reference wavelength of the used filter band and put it into the header. + We assume that each frame in the dataset was observed with the same color filter. + + Args: + input_dataset (corgidrp.data.Dataset): a dataset of Images (L2b-level) + ref_star (str): either the fits file path of the known reference flux (usually CALSPEC), + or the (SIMBAD) name of a CALSPEC star + source_star (str): either the fits file path of the flux model of the observed source in + CALSPEC units (erg/(s * cm^2 * AA) and format or the (SIMBAD) name of a CALSPEC star + + Returns: + corgidrp.data.Dataset: a version of the input dataset with updated header including + the reference wavelength and the color correction factor + """ + color_dataset = input_dataset.copy() + # get the filter name from the header keyword 'CFAMNAME' + filter_name = fluxcal.get_filter_name(color_dataset) + # read the transmission curve from the color filter file + wave, filter_trans = fluxcal.read_filter_curve(filter_name) + # calculate the reference wavelength of the color filter + lambda_ref = fluxcal.calculate_pivot_lambda(filter_trans, wave) + + # ref_star/source_star is either the star name or the file path to fits file + if ref_star.split(".")[-1] == "fits": + calspec_filepath = ref_star + else: + calspec_filepath = fluxcal.get_calspec_file(ref_star) + if source_star.split(".")[-1] == "fits": + source_filepath = source_star + else: + source_filepath = fluxcal.get_calspec_file(source_star) + + # calculate the flux from the user given CALSPEC file binned on the wavelength grid of the filter + flux_ref = fluxcal.read_cal_spec(calspec_filepath, wave) + # we assume that the source spectrum is a calspec standard or its + # model data is in a file with the same format and unit as the calspec data + source_sed = fluxcal.read_cal_spec(source_filepath, wave) + #Calculate the color correction factor + k = fluxcal.compute_color_cor(filter_trans, wave, flux_ref, lambda_ref, source_sed) + + # write the reference wavelength and the color correction factor to the header (keyword names tbd) + history_msg = "the color correction is calculated and added to the header {0}".format(str(k)) + # update the header of the output dataset and update the history + color_dataset.update_after_processing_step(history_msg, header_entries = {"LAM_REF": lambda_ref, "COL_COR": k}) + + return color_dataset + +def update_to_tda(input_dataset): + """ + Updates the data level to TDA (Technical Demo Analysis). Only works on L4 data. + + Currently only checks that data is at the L4 level + + Args: + input_dataset (corgidrp.data.Dataset): a dataset of Images (L4-level) + + Returns: + corgidrp.data.Dataset: same dataset now at TDA-level + """ + # check that we are running this on L1 data + for orig_frame in input_dataset: + if orig_frame.ext_hdr['DATA_LEVEL'] != "L4": + err_msg = "{0} needs to be L4 data, but it is {1} data instead".format(orig_frame.filename, orig_frame.ext_hdr['DATA_LEVEL']) + raise ValueError(err_msg) + + # we aren't altering the data + updated_dataset = input_dataset.copy(copy_data=False) + + for frame in updated_dataset: + # update header + frame.ext_hdr['DATA_LEVEL'] = "TDA" + # update filename convention. The file convention should be + # "CGI_[datalevel_*]" so we should be same just replacing the just instance of L1 + frame.filename = frame.filename.replace("_L4_", "_TDA_", 1) + + history_msg = "Updated Data Level to TDA" + updated_dataset.update_after_processing_step(history_msg) + + return updated_dataset \ No newline at end of file diff --git a/corgidrp/mocks.py b/corgidrp/mocks.py index 2f317d66..cf278aae 100644 --- a/corgidrp/mocks.py +++ b/corgidrp/mocks.py @@ -617,12 +617,13 @@ def create_prescan_files(filedir=None, numfiles=2, arrtype="SCI"): return dataset -def create_default_headers(arrtype="SCI"): +def create_default_headers(arrtype="SCI", vistype="TDEMO"): """ Creates an empty primary header and an Image extension header with some possible keywords Args: - arrtype (str): Observation type. Defaults to "SCI". + arrtype (str): Array type (SCI or ENG). Defaults to "SCI". + vistype (str): Visit type. Defaults to "TDEMO" Returns: tuple: @@ -643,7 +644,8 @@ def create_default_headers(arrtype="SCI"): # fill in prihdr prihdr['OBSID'] = 0 prihdr['BUILD'] = 0 - prihdr['OBSTYPE'] = arrtype + # prihdr['OBSTYPE'] = arrtype + prihdr['VISTYPE'] = vistype prihdr['MOCK'] = True # fill in exthdr @@ -654,7 +656,7 @@ def create_default_headers(arrtype="SCI"): exthdr['GCOUNT'] = 1 exthdr['BSCALE'] = 1 exthdr['BZERO'] = 32768 - exthdr['ARRTYPE'] = arrtype # seems to be the same as OBSTYPE + exthdr['ARRTYPE'] = arrtype exthdr['SCTSRT'] = '2024-01-01T12:00:00.000Z' exthdr['SCTEND'] = '2024-01-01T20:00:00.000Z' exthdr['STATUS'] = 0 @@ -988,7 +990,7 @@ def create_astrom_data(field_path, filedir=None, subfield_radius=0.02, platescal # load as an image object frames = [] prihdr, exthdr = create_default_headers() - prihdr['OBSTYPE'] = 'ASTROM' + prihdr['VISTYPE'] = 'BORESITE' prihdr['RA'] = target[0] prihdr['DEC'] = target[1] diff --git a/corgidrp/recipe_templates/l1_to_kgain.json b/corgidrp/recipe_templates/l1_to_kgain.json index 9c3ebdda..cad6e2ab 100644 --- a/corgidrp/recipe_templates/l1_to_kgain.json +++ b/corgidrp/recipe_templates/l1_to_kgain.json @@ -1,3 +1,4 @@ + { "name" : "l1_to_kgain", "template" : true, @@ -7,6 +8,11 @@ "inputs" : [], "outputdir" : "", "steps" : [ + { "name" : "sort_pupilimg_frames", + "keywords" : { + "cal_type" : "k-gain" + } + }, { "name" : "prescan_biassub", "keywords" : { diff --git a/corgidrp/recipe_templates/l1_to_l2a_nonlin.json b/corgidrp/recipe_templates/l1_to_l2a_nonlin.json index fcc94573..8b476ead 100644 --- a/corgidrp/recipe_templates/l1_to_l2a_nonlin.json +++ b/corgidrp/recipe_templates/l1_to_l2a_nonlin.json @@ -7,6 +7,11 @@ "inputs" : [], "outputdir" : "", "steps" : [ + { "name" : "sort_pupilimg_frames", + "keywords" : { + "cal_type" : "non-lin" + } + }, { "name" : "prescan_biassub", "keywords" : { @@ -36,7 +41,6 @@ "calibs" : { "DetectorParams" : "AUTOMATIC", "KGain" : "AUTOMATIC, OPTIONAL" - } }, { diff --git a/corgidrp/recipe_templates/l1_to_l2a_tdd_vap.json b/corgidrp/recipe_templates/l1_to_l2a_tdd_vap.json new file mode 100644 index 00000000..8e302a26 --- /dev/null +++ b/corgidrp/recipe_templates/l1_to_l2a_tdd_vap.json @@ -0,0 +1,72 @@ +{ + "name" : "l1_to_l2a_basic", + "template" : true, + "drpconfig" : { + "track_individual_errors" : false + }, + "inputs" : [], + "outputdir" : "", + "steps" : [ + { + "name" : "prescan_biassub", + "calibs" : { + "DetectorNoiseMaps" : "AUTOMATIC,OPTIONAL" + }, + "keywords" : { + "return_full_frame" : false, + "detector_regions" : { + "SCI" : { + "frame_rows" : 1200, + "frame_cols" : 2200, + "image" : { + "rows": 1024, + "cols": 1024, + "r0c0": [13, 1088] + }, + "prescan" : { + "rows": 1200, + "cols": 1088, + "r0c0": [0, 0], + "col_start": 800, + "col_end": 1000 + }, + "prescan_reliable" : { + "rows": 1200, + "cols": 1088, + "r0c0": [0, 0] + }, + "parallel_overscan" : { + "rows": 163, + "cols": 1056, + "r0c0": [1037, 1088] + }, + "serial_overscan" : { + "rows": 1200, + "cols": 56, + "r0c0": [0, 2144] + } + } + } + } + }, + { + "name" : "detect_cosmic_rays", + "calibs" : { + "DetectorParams" : "AUTOMATIC", + "KGain" : "AUTOMATIC, OPTIONAL" + } + }, + { + "name" : "correct_nonlinearity", + "calibs" : { + "NonLinearityCalibration" : "AUTOMATIC" + } + }, + { + "name" : "update_to_l2a" + }, + { + "name" : "save" + } + ] +} diff --git a/corgidrp/recipe_templates/l1_to_l2a_tdd_vap_full.json b/corgidrp/recipe_templates/l1_to_l2a_tdd_vap_full.json new file mode 100644 index 00000000..50151aff --- /dev/null +++ b/corgidrp/recipe_templates/l1_to_l2a_tdd_vap_full.json @@ -0,0 +1,72 @@ +{ + "name" : "l1_to_l2a_basic", + "template" : true, + "drpconfig" : { + "track_individual_errors" : false + }, + "inputs" : [], + "outputdir" : "", + "steps" : [ + { + "name" : "prescan_biassub", + "calibs" : { + "DetectorNoiseMaps" : "AUTOMATIC,OPTIONAL" + }, + "keywords" : { + "return_full_frame" : true, + "detector_regions" : { + "SCI" : { + "frame_rows" : 1200, + "frame_cols" : 2200, + "image" : { + "rows": 1024, + "cols": 1024, + "r0c0": [13, 1088] + }, + "prescan" : { + "rows": 1200, + "cols": 1088, + "r0c0": [0, 0], + "col_start": 800, + "col_end": 1000 + }, + "prescan_reliable" : { + "rows": 1200, + "cols": 1088, + "r0c0": [0, 0] + }, + "parallel_overscan" : { + "rows": 163, + "cols": 1056, + "r0c0": [1037, 1088] + }, + "serial_overscan" : { + "rows": 1200, + "cols": 56, + "r0c0": [0, 2144] + } + } + } + } + }, + { + "name" : "detect_cosmic_rays", + "calibs" : { + "DetectorParams" : "AUTOMATIC", + "KGain" : "AUTOMATIC, OPTIONAL" + } + }, + { + "name" : "correct_nonlinearity", + "calibs" : { + "NonLinearityCalibration" : "AUTOMATIC" + } + }, + { + "name" : "update_to_l2a" + }, + { + "name" : "save" + } + ] +} diff --git a/corgidrp/recipe_templates/l2a_to_l2b_tdd_vap.json b/corgidrp/recipe_templates/l2a_to_l2b_tdd_vap.json new file mode 100644 index 00000000..7664e45f --- /dev/null +++ b/corgidrp/recipe_templates/l2a_to_l2b_tdd_vap.json @@ -0,0 +1,65 @@ +{ + "name" : "l2a_to_l2b", + "template" : true, + "drpconfig" : { + "track_individual_errors" : false + }, + "inputs" : [], + "outputdir" : "/Users/ygouf/git/cgi-sit/13-tdd/vi_tdd_04_iit_data_processing/output_drp_L2b/", + "steps" : [ + { + "name" : "frame_select" + }, + { + "name" : "convert_to_electrons", + "calibs" : { + "KGain" : "AUTOMATIC" + } + }, + { + "name" : "em_gain_division" + }, + { + "name" : "add_photon_noise" + }, + { + "name" : "dark_subtraction", + "calibs" : { + "DetectorNoiseMaps" : "AUTOMATIC" + }, + "keywords" : { + "outputdir" : "AUTOMATIC" + } + }, + { + "name" : "desmear", + "calibs" : { + "DetectorParams" : "AUTOMATIC" + } + }, + { + "name" : "cti_correction", + "calibs" : { + "TrapCalibration" : "AUTOMATIC,OPTIONAL" + } + }, + { + "name" : "flat_division", + "calibs" : { + "FlatField" : "AUTOMATIC" + } + }, + { + "name" : "correct_bad_pixels", + "calibs" : { + "BadPixelMap" : "AUTOMATIC" + } + }, + { + "name" : "update_to_l2b" + }, + { + "name" : "save" + } + ] +} diff --git a/corgidrp/recipe_templates/test.json b/corgidrp/recipe_templates/test.json new file mode 100644 index 00000000..bb1e64da --- /dev/null +++ b/corgidrp/recipe_templates/test.json @@ -0,0 +1,53 @@ +{ + "name" : "l1_to_l2a_nonlin", + "template" : true, + "drpconfig" : { + "track_individual_errors" : false + }, + "inputs" : [], + "outputdir" : "", + "steps" : [ + { + "name" : "prescan_biassub", + "keywords" : { + "return_full_frame" : true, + "detector_regions" : { + "SCI" : { + "frame_rows" : 1200, + "frame_cols" : 2200, + "prescan" : { + "rows": 1200, + "cols": 1088, + "r0c0": [0, 0], + "col_start": 799, + "col_end": 1000 + }, + "prescan_reliable" : { + "rows": 1200, + "cols": 201, + "r0c0": [0, 799] + } + } + } + } + }, + { + "name" : "detect_cosmic_rays", + "calibs" : { + "DetectorParams" : "AUTOMATIC", + "KGain" : "AUTOMATIC, OPTIONAL" + } + }, + { "name" : "sort_pupilimg_frames", + "keywords" : { + "cal_type" : "non-lin" + } + }, + { + "name" : "calibrate_nonlin" + }, + { + "name" : "save" + } + ] +} diff --git a/corgidrp/sorting.py b/corgidrp/sorting.py new file mode 100644 index 00000000..b34ec77c --- /dev/null +++ b/corgidrp/sorting.py @@ -0,0 +1,262 @@ +import copy +import numpy as np + +import corgidrp.data as data + +def extract_frame_id(filename): + """ + Extract frame ID from an L1 filename. Structure is assumed to be ending + like '..._frame_id.fits' where frame_id is a series of digits + + Args: + filename: L1 filename + + Returns: + Frame id as a string of length 10 + """ + idx_0 = len(filename) - filename[::-1].find('_') + idx_1 = len(filename) - filename[::-1].find('.') - 1 + + return int(filename[idx_0:idx_1]) + +def sort_pupilimg_frames( + dataset_in, + cal_type=''): + """ + Sorting algorithm that given a dataset will output a dataset with the + frames used to generate a mean frame and the frames used to calibrate + the calibration type: k-gain. non-linearity. + + The output dataset has an added keyword value in its extended header: + OBSTYPE with values 'MNFRAME' (for mean frame), 'KGAIN' (for K-gain), + and 'NONLIN' (for non-linearity). + + Args: + dataset_in (corgidrp.Dataset): dataset with all the frames to be sorted. + By default, it is expected to contain all the frames from the PUPILIMG + visit files associated with a calibration campaign + cal_type (string): the calibration type. Case insensitive. + Accepted values are: + 'k-gain' or 'kgain' for K-gain calibration + 'non-lin(earity)', 'nonlin(earity)' for non-linearity calibration, where + the letters within parenthesis are optional. + + Returns: + + Dataset with the frames used to generate a mean frame and the frames + used to calibrate the calibration type: k-gain or non-linearity. + """ + # Copy dataset + dataset_cp = dataset_in.copy() + # Split by CMDGAIN + split_cmdgain = dataset_cp.split_dataset(exthdr_keywords=['CMDGAIN']) + # Mean frame: split by EXPTIME + idx_unity = np.where(np.array(split_cmdgain[1])==1)[0][0] + split_exptime = split_cmdgain[0][idx_unity].split_dataset(exthdr_keywords=['EXPTIME']) + # Get the set with the most frames + n_frames_list = np.zeros(len(split_exptime[0])) + for i_sub, subset in enumerate(split_exptime[0]): + n_frames_list[i_sub] = len(split_exptime[0][i_sub]) + # Choice: choose the subset with the maximum number of frames + idx_mean_frame = np.argmax(n_frames_list) + frame_id_list = [] + for frame in split_exptime[0][idx_mean_frame]: + frame_id_list += [extract_frame_id(frame.filename)] + # Choose the frames with consecutive ID numbers (same row in AUX file) + frame_id_sort = np.array(frame_id_list) + frame_id_sort.sort() + count_cons = [1] + idx_cons = 0 + for idx in range(len(frame_id_sort)-1): + if frame_id_sort[idx+1] - frame_id_sort[idx] == 1: + count_cons[idx_cons] += 1 + else: + idx_cons += 1 + count_cons += [1] + # Choose the largest subset + idx_mean_frame_cons = np.argmax(count_cons) + idx_mean_frame_last = np.sum(count_cons[0:idx_mean_frame_cons+1]).astype(int) + idx_mean_frame_first = idx_mean_frame_last - count_cons[idx_mean_frame_cons] + frame_id_mean_frame = frame_id_sort[idx_mean_frame_first:idx_mean_frame_last] + mean_frame_list = [] + + n_mean_frame = 0 + for frame in split_exptime[0][idx_mean_frame]: + if int(extract_frame_id(frame.filename)) in frame_id_mean_frame: + exptime_mean_frame = frame.ext_hdr['EXPTIME'] + # Update keyword OBSTYPE + frame.pri_hdr['OBSTYPE'] = 'MNFRAME' + mean_frame_list += [frame] + n_mean_frame += 1 + + sorting_summary = (f'Mean frame has {n_mean_frame} unity frames with' + + f' exposure time {exptime_mean_frame} seconds. ') + + # K-gain and non-linearity + cal_frame_list = [] + if cal_type.lower() == 'k-gain' or cal_type.lower() == 'kgain': + print('Considering K-gain:') + elif cal_type.lower()[0:7] == 'non-lin' or cal_type.lower()[0:6] == 'nonlin': + print('Considering Non-linearity:') + else: + raise Exception('Unrecognized calibration type (expected k-gain, non-lin)') + + # Remove main frame frames from unity gain frames + split_exptime[0].remove(split_exptime[0][idx_mean_frame]) + split_exptime[1].remove(split_exptime[1][idx_mean_frame]) + # Frames must be taken consecutively + frame_id_list = [] + exptime_list = [] + unity_gain_filepath_list = [] + for subset in split_exptime[0]: + for frame in subset: + frame_id_list += [extract_frame_id(frame.filename)] + exptime_list += [frame.ext_hdr['EXPTIME']] + unity_gain_filepath_list += [frame.filepath] + idx_id_sort = np.argsort(frame_id_list) + exptime_arr = np.array(exptime_list)[idx_id_sort] + # Count repeated, consecutive elements + count_cons = [1] + exptime_cons = [exptime_arr[0]] + idx_cons = 0 + for exptime in exptime_arr: + if exptime == exptime_cons[-1]: + count_cons[idx_cons] += 1 + else: + idx_cons += 1 + count_cons += [1] + exptime_cons += [exptime] + # First index always has a repetition in the previous loop (id=id) + count_cons[0] -= 1 + + idx_cons2 = [0] + exptime_cons2 = [exptime_cons[idx_cons2[0]]] + kgain_subset = [] + # Iterate over unique counts that are consecutive + for idx_count in range(len(count_cons) - 1): + # Indices to cover two consecutive sets + idx_id_first = np.sum(count_cons[0:idx_count]).astype(int) + idx_id_last = np.sum(count_cons[0:idx_count+2]).astype(int) + diff_id = np.diff(np.array(frame_id_list)[idx_id_sort[idx_id_first:idx_id_last]]) + diff_exp = np.diff(exptime_cons) + # Both subsets must have all Ids consecutive because they are in + # time order + if (count_cons[idx_count+1] == count_cons[idx_count] and + np.all(diff_id == 1) and diff_exp[idx_count] > 0): + exptime_cons2 += [exptime_cons[idx_count+1]] + idx_cons2 += [idx_count+1] + # Last exposure time must be repeated and only once + elif (diff_exp[idx_count] < 0 and + exptime_cons[idx_count+1] in exptime_cons[0:idx_count+1] and + len(exptime_cons2) == len(set(exptime_cons2))): + kgain_subset += [idx_cons2[0], idx_count+1] + idx_cons2 = [idx_count+1] + exptime_cons2 = [exptime_cons] + else: + # It is not a subset for kgain + continue + # Choose the largest subset + kgain_subset = np.array(kgain_subset) + idx_kgain = np.argmax(kgain_subset[1::2] - kgain_subset[0::2]) + # Extract first/last index in the subset of consecutive frames + idx_kgain_0 = kgain_subset[2*idx_kgain] + idx_kgain_1 = kgain_subset[2*idx_kgain + 1] + # Count frames before and subset length + idx_kgain_first = np.sum(count_cons[0:idx_kgain_0]).astype(int) + idx_kgain_last = (idx_kgain_first + + np.sum(count_cons[idx_kgain_0:idx_kgain_1+1]).astype(int)) + + # Sort unity gain filenames + unity_gain_filepath_arr = np.array(unity_gain_filepath_list)[idx_id_sort] + cal_list = unity_gain_filepath_arr[idx_kgain_first:idx_kgain_last] + # Update OBSTYPE and take profit to check files are in the list + n_kgain = 0 + cal_frame_list = [] + for frame in dataset_cp: + if frame.filepath in cal_list: + vistype = frame.pri_hdr['VISTYPE'] + frame.pri_hdr['OBSTYPE'] = 'KGAIN' + cal_frame_list += [frame] + n_kgain += 1 + + sorting_summary += (f'K-gain has {n_kgain} unity frames with exposure ' + + f'times {exptime_cons[idx_kgain_0:idx_kgain_1+1]} seconds with ' + + f'{count_cons[idx_kgain_0]} frames each. ') + + # Non-unity gain frames for Non-linearity + if cal_type.lower()[0:7] == 'non-lin' or cal_type.lower()[0:6] == 'nonlin': + # Non-unity gain frames + split_cmdgain[0].remove(split_cmdgain[0][idx_unity]) + split_cmdgain[1].remove(split_cmdgain[1][idx_unity]) + n_nonlin = 0 + nonlin_emgain = [] + for idx_gain_set, gain_set in enumerate(split_cmdgain[0]): + # Frames must be taken consecutively + frame_id_list = [] + exptime_list = [] + gain_filepath_list = [] + for frame in gain_set: + frame_id_list += [extract_frame_id(frame.filename)] + exptime_list += [frame.ext_hdr['EXPTIME']] + gain_filepath_list += [frame.filepath] + # One can set a stronger condition, though in the end the max set + if len(frame_id_list) < 3: + continue + idx_id_sort = np.argsort(frame_id_list) + exptime_arr = np.array(exptime_list)[idx_id_sort] + # We need an increasing series of exposure times with the last one + # the only repeated value in the series + idx_subsets = np.where(np.diff(exptime_arr) < 0)[0] + if len(idx_subsets) == 0: + continue + # length of candidate subsets + nonlin_len = [] + for idx, idx_subset in enumerate(idx_subsets): + # Add 0th element plus the one lost in diff + if idx == 0: + exptime_tmp = exptime_arr[0:idx_subset+2] + else: + exptime_tmp = exptime_arr[idx_subsets[idx-1]+1:idx_subset+2] + # Check conditions + if (exptime_tmp[-1] in exptime_tmp[:-1] and + len(exptime_tmp) - 1 == len(set(exptime_tmp))): + nonlin_len += [len(exptime_tmp)] + else: + nonlin_len += [-1] + # COntinue of there are no good candidates + if np.max(nonlin_len) <= 0: + continue + # Find maximum set among good candidates + if np.argmax(nonlin_len) == 0: + idx_nonlin_first = 0 + idx_nonlin_last = idx_subsets[0] + 1 + else: + idx_nonlin_first = idx_subsets[np.argmax(nonlin_len)-1] + 1 + idx_nonlin_last = idx_subsets[np.argmax(nonlin_len)] + 1 + # Sort unity gain filenames + gain_filepath_arr = np.array(gain_filepath_list)[idx_id_sort] + cal_list = gain_filepath_arr[idx_nonlin_first:idx_nonlin_last+1] + # Update OBSTYPE and take profit to check files are in the list + for frame in dataset_cp: + if frame.filepath in cal_list: + vistype = frame.pri_hdr['VISTYPE'] + frame.pri_hdr['OBSTYPE'] = 'NONLIN' + cal_frame_list += [frame] + n_nonlin += 1 + nonlin_emgain += [split_cmdgain[1][idx_gain_set]] + + sorting_summary += (f'Non-linearity has {n_nonlin} frames with gains ' + + f'{nonlin_emgain}') + + # TODO: Add a HISTORY entry + history = (f'Dataset to calibrate {cal_type.upper()}. A sorting algorithm ' + + 'based on the constraints that NFRAMES, EXPTIME and CMDGAIN have when collecting ' + + 'calibration data for K-gain, Non-linearity and EM-gain vs DAC ' + f"was applied to an input dataset from {vistype} visit files." + + f'The result is that {sorting_summary}.') + print(history) + + dataset_sorted = data.Dataset(mean_frame_list + cal_frame_list) + dataset_sorted.update_after_processing_step(history) + # Return Datafrane with mean frame and cal type + return dataset_sorted diff --git a/corgidrp/walker.py b/corgidrp/walker.py index 0264b860..25f9b441 100644 --- a/corgidrp/walker.py +++ b/corgidrp/walker.py @@ -15,6 +15,7 @@ import corgidrp.calibrate_nonlin import corgidrp.detector import corgidrp.darks +import corgidrp.sorting all_steps = { "prescan_biassub" : corgidrp.l1_to_l2a.prescan_biassub, @@ -39,7 +40,8 @@ "calibrate_darks" : corgidrp.darks.calibrate_darks_lsq, "create_onsky_flatfield" : corgidrp.detector.create_onsky_flatfield, "combine_subexposures" : corgidrp.combine.combine_subexposures, - "build_trad_dark" : corgidrp.darks.build_trad_dark + "build_trad_dark" : corgidrp.darks.build_trad_dark, + "sort_pupilimg_frames" : corgidrp.sorting.sort_pupilimg_frames } recipe_dir = os.path.join(os.path.dirname(__file__), "recipe_templates") @@ -53,28 +55,42 @@ def walk_corgidrp(filelist, CPGS_XML_filepath, outputdir, template=None): filelist (list of str): list of filepaths to files CPGS_XML_filepath (str): path to CPGS XML file for this set of files in filelist outputdir (str): output directory folderpath - template (str or json): custom template. either the full json file, or a filename of - a template that's already in the recipe_templates folder + template (str or json): custom template. It can be one of three things + * the full json object, + * a filename of a template that's already in the recipe_templates folder + * a filepath to a template on disk somewhere + Returns: - json: the JSON recipe that was used for processing + json or list: the JSON recipe (or list of JSON recipes) that was used for processing """ if isinstance(template, str): - recipe_filepath = os.path.join(recipe_dir, template) + if os.path.sep not in template: + # this is just a template name in the recipe_templates folder + recipe_filepath = os.path.join(recipe_dir, template) + else: + recipe_filepath = template + template = json.load(open(recipe_filepath, 'r')) # generate recipe - recipe = autogen_recipe(filelist, outputdir, template=template) - - # process_recipe - run_recipe(recipe) + recipes = autogen_recipe(filelist, outputdir, template=template) - return recipe + # process recipe + if isinstance(recipes, list): + # if multiple recipes + for recipe in recipes: + run_recipe(recipe) + else: + # process single recipe + run_recipe(recipes) + return recipes def autogen_recipe(filelist, outputdir, template=None): """ - Automatically creates a recipe by identifyng and populating a template + Automatically creates a recipe (or recipes) by identifyng and populating a template. + Returns a single recipe unless there are multiple recipes that should be produced. Args: filelist (list of str): list of filepaths to files @@ -82,7 +98,7 @@ def autogen_recipe(filelist, outputdir, template=None): template (json): enables passing in of custom template, if desired Returns: - json: the JSON recipe to process the filelist + json list: the JSON recipe (or list of recipes) that the input filelist will be processed with """ # Handle the case where filelist is empty if not filelist: @@ -97,36 +113,56 @@ def autogen_recipe(filelist, outputdir, template=None): if template is None: recipe_filename = guess_template(dataset) - # load the template recipe - recipe_filepath = os.path.join(recipe_dir, recipe_filename) - template = json.load(open(recipe_filepath, 'r')) - - # create the personalized recipe - recipe = template.copy() - recipe["template"] = False - - for filename in filelist: - recipe["inputs"].append(filename) - - recipe["outputdir"] = outputdir - - ## Populate default values - ## This includes calibration files that need to be automatically determined - ## This also includes the dark subtraction outputdir for synthetic darks - this_caldb = caldb.CalDB() - for step in recipe["steps"]: - # by default, identify all the calibration files needed, unless jit setting is turned on - # two cases where we should be identifying the calibration recipes now - if "jit_calib_id" in recipe['drpconfig'] and (not recipe['drpconfig']["jit_calib_id"]): - _fill_in_calib_files(step, this_caldb, first_frame) - elif ("jit_calib_id" not in recipe['drpconfig']) and (not corgidrp.jit_calib_id): - _fill_in_calib_files(step, this_caldb, first_frame) - - if step["name"].lower() == "dark_subtraction": - if step["keywords"]["outputdir"].upper() == "AUTOMATIC": - step["keywords"]["outputdir"] = recipe["outputdir"] - - return recipe + # handle it as a list moving forward + if isinstance(recipe_filename, list): + recipe_filename_list = recipe_filename + else: + recipe_filename_list = [recipe_filename] + + recipe_template_list = [] + for recipe_filename in recipe_filename_list: + # load the template recipe + recipe_filepath = os.path.join(recipe_dir, recipe_filename) + template = json.load(open(recipe_filepath, 'r')) + recipe_template_list.append(template) + else: + # user passed in a single template + recipe_template_list = [template] + + recipe_list = [] + for template in recipe_template_list: + # create the personalized recipe + recipe = template.copy() + recipe["template"] = False + + for filename in filelist: + recipe["inputs"].append(filename) + + recipe["outputdir"] = outputdir + + ## Populate default values + ## This includes calibration files that need to be automatically determined + ## This also includes the dark subtraction outputdir for synthetic darks + this_caldb = caldb.CalDB() + for step in recipe["steps"]: + # by default, identify all the calibration files needed, unless jit setting is turned on + # two cases where we should be identifying the calibration recipes now + if "jit_calib_id" in recipe['drpconfig'] and (not recipe['drpconfig']["jit_calib_id"]): + _fill_in_calib_files(step, this_caldb, first_frame) + elif ("jit_calib_id" not in recipe['drpconfig']) and (not corgidrp.jit_calib_id): + _fill_in_calib_files(step, this_caldb, first_frame) + + if step["name"].lower() == "dark_subtraction": + if step["keywords"]["outputdir"].upper() == "AUTOMATIC": + step["keywords"]["outputdir"] = recipe["outputdir"] + + recipe_list.append(recipe) + + # if only a single recipe, return the recipe. otherwise return list + if len(recipe_list) > 1: + return recipe_list + else: + return recipe_list[0] def _fill_in_calib_files(step, this_caldb, ref_frame): """ @@ -183,37 +219,29 @@ def guess_template(dataset): dataset (corgidrp.data.Dataset): a Dataset to process Returns: - str: the best template filename + str or list: the best template filename or a list of multiple template filenames """ image = dataset[0] # first image for convenience if image.ext_hdr['DATA_LEVEL'] == "L1": - if image.pri_hdr['OBSTYPE'] == "ENG": + if 'VISTYPE' not in image.pri_hdr: + # this is probably IIT test data. Do generic processing + recipe_filename = "l1_to_l2b.json" + elif image.pri_hdr['VISTYPE'][:3] == "ENG": + # first three letters are ENG + # for either ENGPUPIL or ENGIMGAGE recipe_filename = "l1_to_l2a_eng.json" - elif image.pri_hdr['OBSTYPE'] == "ASTROM": + elif image.pri_hdr['VISTYPE'] == "BORESITE": recipe_filename = "l1_to_boresight.json" - elif image.pri_hdr['OBSTYPE'] == "FLT": + elif image.pri_hdr['VISTYPE'] == "FFIELD": recipe_filename = "l1_flat_and_bp.json" - elif image.pri_hdr['OBSTYPE'] == "NONLIN": - recipe_filename = "l1_to_l2a_nonlin.json" - elif image.pri_hdr['OBSTYPE'] == "KGAIN": - recipe_filename = "l1_to_kgain.json" - elif image.pri_hdr['OBSTYPE'] == "DARK": + elif image.pri_hdr['VISTYPE'] == "DARK": recipe_filename = "l1_to_l2a_noisemap.json" - elif image.pri_hdr['OBSTYPE'] == "MNFRAME": - # Disambiguate between NONLIN and KGAIN - for data in dataset: - if data.pri_hdr['OBSTYPE'] == "NONLIN": - recipe_filename = "l1_to_l2a_nonlin.json" - break - elif data.pri_hdr['OBSTYPE'] == "KGAIN": - recipe_filename = "l1_to_kgain.json" - break - else: - raise ValueError(f"Define recipe for {data.pri_hdr['OBSTYPE']}") + elif image.pri_hdr['VISTYPE'] == "PUPILIMG": + recipe_filename = ["l1_to_l2a_nonlin.json", "l1_to_kgain.json"] else: recipe_filename = "l1_to_l2b.json" elif image.ext_hdr['DATA_LEVEL'] == "L2a": - if image.pri_hdr['OBSTYPE'] == "DARK": + if image.pri_hdr['VISTYPE'] == "DARK": recipe_filename = "l2a_to_l2a_noisemap.json" else: raise NotImplementedError() diff --git a/setup.py b/setup.py index 9faead23..4a954c0b 100644 --- a/setup.py +++ b/setup.py @@ -10,7 +10,7 @@ def get_requires(): setup( name='corgidrp', - version="1.0", + version="1.1.2", description='(Roman Space Telescope) CORonaGraph Instrument Data Reduction Pipeline', #long_description="", #long_description_content_type="text/markdown", diff --git a/tests/e2e_tests/astrom_e2e.py b/tests/e2e_tests/astrom_e2e.py index e5ff3164..ff491ebd 100644 --- a/tests/e2e_tests/astrom_e2e.py +++ b/tests/e2e_tests/astrom_e2e.py @@ -55,7 +55,7 @@ def test_astrom_e2e(tvacdata_path, e2eoutput_path): for dark in os.listdir(noise_characterization_path): with fits.open(os.path.join(noise_characterization_path, dark)) as hdulist: dark_dat = hdulist[1].data - hdulist[0].header['OBSTYPE'] = "ASTROM" + hdulist[0].header['VISTYPE'] = "BORESITE" # setting SNR to ~250 (arbitrary SNR) scaled_image = ((250 * noise_rms) / np.max(image_sources[0].data)) * image_sources[0].data scaled_image = scaled_image.astype(type(dark_dat[0][0])) diff --git a/tests/e2e_tests/flatfield_e2e.py b/tests/e2e_tests/flatfield_e2e.py index 617970c1..dc27edd4 100644 --- a/tests/e2e_tests/flatfield_e2e.py +++ b/tests/e2e_tests/flatfield_e2e.py @@ -75,7 +75,7 @@ def test_flat_creation_neptune(tvacdata_path, e2eoutput_path): base_image = l1_dark_dataset[i % len(l1_dark_dataset)].copy() base_image.pri_hdr['TARGET'] = "Neptune" base_image.pri_hdr['FILTER'] = 4 - base_image.pri_hdr['OBSTYPE'] = "FLT" + base_image.pri_hdr['VISTYPE'] = "FFIELD" base_image.data = base_image.data.astype(float) base_image.filename = base_filename + "{0:010d}.fits".format(start_filenum+i) @@ -245,7 +245,7 @@ def test_flat_creation_uranus(tvacdata_path, e2eoutput_path): base_image = l1_dark_dataset[i % len(l1_dark_dataset)].copy() base_image.pri_hdr['TARGET'] = "Uranus" base_image.pri_hdr['FILTER'] = 1 - base_image.pri_hdr['OBSTYPE'] = "FLT" + base_image.pri_hdr['VISTYPE'] = "FFIELD" base_image.data = base_image.data.astype(float) base_image.filename = base_filename + "{0:010d}.fits".format(start_filenum+i) diff --git a/tests/e2e_tests/kgain_e2e.py b/tests/e2e_tests/kgain_e2e.py index 9f8386d1..78e46119 100644 --- a/tests/e2e_tests/kgain_e2e.py +++ b/tests/e2e_tests/kgain_e2e.py @@ -14,6 +14,8 @@ from cal.kgain.calibrate_kgain import calibrate_kgain import cal except: + # For tests to pass. Is it not necessary? See 'default_config_file' below + print('Install e2e dependencies with pip install -r requirements_e2etests.txt') pass thisfile_dir = os.path.dirname(__file__) # this file's folder @@ -38,6 +40,7 @@ def test_l1_to_kgain(tvacdata_path, e2eoutput_path): stack_arr2_f.append(file) with fits.open(file, mode='update') as hdus: try: + hdus[0].header['VISTYPE'] = 'PUPILIMG' hdus[0].header['OBSTYPE'] = 'MNFRAME' except: pass @@ -51,6 +54,7 @@ def test_l1_to_kgain(tvacdata_path, e2eoutput_path): stack_arr_f.append(file) with fits.open(file, mode='update') as hdus: try: + hdus[0].header['VISTYPE'] = 'PUPILIMG' hdus[0].header['OBSTYPE'] = 'KGAIN' except: pass @@ -103,8 +107,12 @@ def test_l1_to_kgain(tvacdata_path, e2eoutput_path): os.mkdir(kgain_outputdir) ####### Run the DRP walker + print('Running walker') walker.walk_corgidrp(ordered_filelist, "", kgain_outputdir, template="l1_to_kgain.json") - kgain_file = os.path.join(kgain_outputdir, os.path.split(ordered_filelist[0])[1][:-5]+'_kgain.fits') #"CGI_EXCAM_L1_0000051731_kgain.fits") + + ####### Load in the output data. It should be the latest kgain file produced. + possible_kgain_files = glob.glob(os.path.join(kgain_outputdir, '*_kgain.fits')) + kgain_file = max(possible_kgain_files, key=os.path.getmtime) # get the one most recently modified kgain = data.KGain(kgain_file) @@ -134,7 +142,7 @@ def test_l1_to_kgain(tvacdata_path, e2eoutput_path): # to edit the file. The arguments use the variables in this file as their # defaults allowing the use to edit the file if that is their preferred # workflow. - tvacdata_dir = '/Users/kevinludwick/Library/CloudStorage/Box-Box/CGI_TVAC_Data/Working_Folder' #"/home/schreiber/DataCopy/corgi/CGI_TVAC_Data/" + tvacdata_dir = '/home/jwang/Desktop/CGI_TVAC_Data/' outputdir = thisfile_dir ap = argparse.ArgumentParser(description="run the l1->kgain end-to-end test") diff --git a/tests/e2e_tests/noisemap_cal_e2e.py b/tests/e2e_tests/noisemap_cal_e2e.py index ea8f1ac8..916c782c 100644 --- a/tests/e2e_tests/noisemap_cal_e2e.py +++ b/tests/e2e_tests/noisemap_cal_e2e.py @@ -27,11 +27,11 @@ def set_obstype_for_darks( list_of_fits, ): - """ Adds proper values to OBSTYPE for the NoiseMap calibration: DARKS + """ Adds proper values to VISTYPE for the NoiseMap calibration: DARKS (data used to calibrate the dark noise sources). This function is unnecessary with future data because data will have - the proper values in OBSTYPE. + the proper values in VISTYPE. Args: list_of_fits (list): list of FITS files that need to be updated. @@ -41,7 +41,7 @@ def set_obstype_for_darks( for file in list_of_fits: fits_file = fits.open(file) prihdr = fits_file[0].header - prihdr['OBSTYPE'] = 'DARK' + prihdr['VISTYPE'] = 'DARK' # Update FITS file fits_file.writeto(file, overwrite=True) @@ -153,7 +153,7 @@ def test_noisemap_calibration_from_l1(tvacdata_path, e2eoutput_path): #Since the walker updates to L2a and the filename accordingly: output_filename = output_filenamel1.replace('L1','L2a',1) - # Update OBSTYPE to "DARKS" for DRP run + # Update VISTYPE to "DARK" for DRP run set_obstype_for_darks(stack_arr_files) ####### Run the DRP walker @@ -169,6 +169,7 @@ def test_noisemap_calibration_from_l1(tvacdata_path, e2eoutput_path): # iit_noisemap_fname = os.path.join(iit_noisemap_datadir,"iit_test_noisemaps.fits") corgidrp_noisemap = data.autoload(corgidrp_noisemap_fname) + this_caldb.remove_entry(corgidrp_noisemap) assert(np.nanmax(np.abs(corgidrp_noisemap.data[0]- F_map)) < 1e-11) assert(np.nanmax(np.abs(corgidrp_noisemap.data[1]- C_map)) < 1e-11) @@ -332,7 +333,7 @@ def test_noisemap_calibration_from_l2a(tvacdata_path, e2eoutput_path): kgain.save(filedir=noisemap_outputdir, filename="mock_kgain.fits") this_caldb.create_entry(kgain) - # Update OBSTYPE to "DARKS" for DRP run + # Update VISTPYE to "DARK" for DRP run set_obstype_for_darks(l2a_filepaths) ####### Run the DRP walker diff --git a/tests/e2e_tests/nonlin_e2e.py b/tests/e2e_tests/nonlin_e2e.py index 5c659406..c65f8ac1 100644 --- a/tests/e2e_tests/nonlin_e2e.py +++ b/tests/e2e_tests/nonlin_e2e.py @@ -16,82 +16,30 @@ thisfile_dir = os.path.dirname(__file__) # this file's folder - -def set_obstype_for_tvac( +def set_vistype_for_tvac( list_of_fits, ): - """ Adds proper values to OBSTYPE for the non-linearity calibration: NONLIN, - (data used to calibrate the non-linearity) or MNFRAME (data used to build - a mean frame). + """ Adds proper values to VISTYPE for non-linearity calibration. This function is unnecessary with future data because data will have - the proper values in OBSTYPE. The TVAC data used must be the + the proper values in VISTYPE. Hence, the "tvac" string in its name. + For reference, TVAC data used to calibrate non-linearity were the following 382 files with IDs: 51841-51870 (30: mean frame). And NL: 51731-51840 (110), 51941-51984 (44), 51986-52051 (66), 55122-55187 (66), 55191-55256 (66) Args: list_of_fits (list): list of FITS files that need to be updated. - """ - # Folder with files - nonlin_dir = list_of_fits[0][0:len(list_of_fits[0]) - list_of_fits[0][::-1].find('/')] - # TVAC files - tvac_file_0 = [ - 'CGI_EXCAM_L1_0000051841.fits', - 'CGI_EXCAM_L1_0000051731.fits', - 'CGI_EXCAM_L1_0000051941.fits', - 'CGI_EXCAM_L1_0000051986.fits', - 'CGI_EXCAM_L1_0000055122.fits', - 'CGI_EXCAM_L1_0000055191.fits', - ] - - n_files = [30, 110, 44, 66, 66, 66] - if len(tvac_file_0) != len(n_files): - raise ValueError(f'Inconsistent number of files{n_files} and stacks {len(tvac_file_0)}') - - for i_group, file in enumerate(tvac_file_0): - l1_number = int(file[file.find('L1_')+3:file.find('L1_')+13]) - print(f'Group of {n_files[i_group]} files starting with {file}') - for i_file in range(n_files[i_group]): - file_name = f'CGI_EXCAM_L1_00000{l1_number+i_file}.fits' - # Additional check - if np.any([nonlin_dir+file_name == file for file in list_of_fits]) is False: - raise IOError(f'The file {nonlin_dir+file} is not part of the calibration data') - fits_file = fits.open(nonlin_dir+file_name) - prihdr = fits_file[0].header - # Adjust OBSTYPE - if n_files[i_group] == 30: - prihdr['OBSTYPE'] = 'MNFRAME' - else: - prihdr['OBSTYPE'] = 'NONLIN' - # Update FITS file - fits_file.writeto(nonlin_dir+file_name, overwrite=True) - - -def get_first_nonlin_file( - list_of_fits, - ): - """ Returns the first FITS file with the NONLIN value on OBSTYPE in a list - of FITS files. - - Remember that FITS files used for NL calibration must have DATETIME in - ascending order. - - Args: - list_of_fits (list): list of FITS files that need to be updated. - - Returns: - first_fits_file (str): First FITS file with OBSTYPE set to NONLIN. - - """ - first_fits_file = 'NONLIN not found' + print("Adding VISTYPE='PUPILIMG' to TVAC data") for file in list_of_fits: fits_file = fits.open(file) - if fits_file[0].header['OBSTYPE'] == 'NONLIN': - first_fits_file = fits_file.filename() - break - return first_fits_file + prihdr = fits_file[0].header + # Adjust VISTYPE + prihdr['VISTYPE'] = 'PUPILIMG' + # Update FITS file + fits_file.writeto(file, overwrite=True) + @pytest.mark.e2e def test_nonlin_cal_e2e( @@ -115,7 +63,7 @@ def test_nonlin_cal_e2e( nonlin_l1_datadir = os.path.join(tvacdata_path, 'TV-20_EXCAM_noise_characterization', 'nonlin') tvac_caldir = os.path.join(tvacdata_path, 'TV-36_Coronagraphic_Data', 'Cals') - e2eoutput_path = os.path.join(e2eoutput_path, 'l1_to_l2a_output') + e2eoutput_path = os.path.join(e2eoutput_path, 'l1_to_nonlin_output') if not os.path.exists(nonlin_l1_datadir): raise FileNotFoundError('Please store L1 data used to calibrate non-linearity', @@ -132,9 +80,7 @@ def test_nonlin_cal_e2e( nonlin_l1_list.sort() # Set TVAC OBSTYPE to MNFRAME/NONLIN (flight data should have these values) - set_obstype_for_tvac(nonlin_l1_list) - - first_nonlin_file = get_first_nonlin_file(nonlin_l1_list) + set_vistype_for_tvac(nonlin_l1_list) # Non-linearity calibration file used to compare the output from CORGIDRP: # We are going to make a new nonlinear calibration file using @@ -154,7 +100,7 @@ def test_nonlin_cal_e2e( nonlinear_cal.save(filedir=e2eoutput_path, filename="nonlin_tvac.fits" ) - # KGain + # KGain kgain_val = 8.7 kgain = data.KGain(np.array([[kgain_val]]), pri_hdr=pri_hdr, ext_hdr=ext_hdr, input_dataset=mock_input_dataset) @@ -164,21 +110,18 @@ def test_nonlin_cal_e2e( # Run the walker on some test_data print('Running walker') - walker.walk_corgidrp(nonlin_l1_list, '', e2eoutput_path) + walker.walk_corgidrp(nonlin_l1_list, '', e2eoutput_path, "l1_to_l2a_nonlin.json") # Compare results print('Comparing the results with TVAC') # NL from CORGIDRP - nonlin_out_filename = first_nonlin_file[len(first_nonlin_file) - - first_nonlin_file[::-1].find(os.path.sep):] - if nonlin_out_filename.find('fits') == -1: - raise IOError('Data files must be FITS files') - nonlin_out_filename = nonlin_out_filename[0:nonlin_out_filename.find('fits')-1] - nonlin_out_filename += '_NonLinearityCalibration.fits' - nonlin_out = fits.open(os.path.join(e2eoutput_path, nonlin_out_filename)) - if nonlin_out[0].header['OBSTYPE'] != 'NONLIN': - raise ValueError('Calibration type is not NL') + possible_nonlin_files = glob.glob(os.path.join(e2eoutput_path, '*_NonLinearityCalibration.fits')) + nonlin_drp_filepath = max(possible_nonlin_files, key=os.path.getmtime) # get the one most recently modified + nonlin_drp_filename = nonlin_drp_filepath.split(os.path.sep)[-1] + + nonlin_out = fits.open(nonlin_drp_filepath) nonlin_out_table = nonlin_out[1].data + n_emgain = nonlin_out_table.shape[1] # NL from TVAC nonlin_tvac = fits.open(os.path.join(e2eoutput_path,'nonlin_tvac.fits')) @@ -186,9 +129,9 @@ def test_nonlin_cal_e2e( # Check if (nonlin_out_table.shape[0] != nonlin_tvac_table.shape[0] or - nonlin_out_table.shape[1] != nonlin_tvac_table.shape[1]): + n_emgain != nonlin_tvac_table.shape[1]): raise ValueError('Non-linearity table from CORGI DRP has a different', - 'format than the one from TVAC') + 'format than the one from TVAC') rel_out_tvac_perc = 100*(nonlin_out_table[1:,1:]/nonlin_tvac_table[1:,1:]-1) @@ -203,17 +146,17 @@ def test_nonlin_cal_e2e( fontsize=14) plt.legend() plt.grid() - plt.savefig(os.path.join(e2eoutput_path,nonlin_out_filename[:-5])) + plt.savefig(os.path.join(e2eoutput_path,nonlin_drp_filename[:-5]+".png")) print(f'NL differences wrt ENG/TVAC delivered code ({nonlin_table_from_eng}): ' + f'max={np.abs(rel_out_tvac_perc).max():1.1e} %, ' + f'rms={np.std(rel_out_tvac_perc):1.1e} %') - print(f'Figure saved: {os.path.join(e2eoutput_path,nonlin_out_filename[:-5])}.png') + print(f'Figure saved: {os.path.join(e2eoutput_path,nonlin_drp_filename[:-5])}.png') # Set a quantitative test for the comparison assert np.less(np.abs(rel_out_tvac_perc).max(), 1e-4) # remove entry from caldb - nonlin_entry = data.NonLinearityCalibration(os.path.join(e2eoutput_path, nonlin_out_filename)) + nonlin_entry = data.NonLinearityCalibration(os.path.join(e2eoutput_path, nonlin_drp_filename)) this_caldb.remove_entry(nonlin_entry) this_caldb.remove_entry(kgain) # Print success message @@ -227,7 +170,7 @@ def test_nonlin_cal_e2e( # defaults allowing the use to edit the file if that is their preferred # workflow. - TVACDATA_DIR = "/Users/srhildeb/Documents/GitHub/CGI_TVAC_Data/" + TVACDATA_DIR = '/home/jwang/Desktop/CGI_TVAC_Data/' OUTPUT_DIR = thisfile_dir ap = argparse.ArgumentParser(description="run the non-linearity end-to-end test") diff --git a/tests/e2e_tests/nonlin_kgain_e2e.py b/tests/e2e_tests/nonlin_kgain_e2e.py new file mode 100644 index 00000000..fd3a070a --- /dev/null +++ b/tests/e2e_tests/nonlin_kgain_e2e.py @@ -0,0 +1,137 @@ +""" Module to test the generation of both nonlin and kgain from the same PUPILIMG dataset """ +import os +import glob +import argparse +import pytest +import numpy as np +from astropy import time +from astropy.io import fits +import matplotlib.pyplot as plt + +import corgidrp +from corgidrp import data +from corgidrp import mocks +from corgidrp import walker +from corgidrp import caldb + +thisfile_dir = os.path.dirname(__file__) # this file's folder + +def set_vistype_for_tvac( + list_of_fits, + ): + """ Adds proper values to VISTYPE for non-linearity calibration. + + This function is unnecessary with future data because data will have + the proper values in VISTYPE. Hence, the "tvac" string in its name. + + Args: + list_of_fits (list): list of FITS files that need to be updated. + """ + print("Adding VISTYPE='PUPILIMG' to TVAC data") + for file in list_of_fits: + fits_file = fits.open(file) + prihdr = fits_file[0].header + # Adjust VISTYPE + prihdr['VISTYPE'] = 'PUPILIMG' + # Update FITS file + fits_file.writeto(file, overwrite=True) + + +@pytest.mark.e2e +def test_nonlin_and_kgain_e2e( + tvacdata_path, + e2eoutput_path, + ): + """ + Performs the e2e test to generate both nonlin and kgain calibrations from the same + L1 pupilimg dataset + + Args: + tvacdata_path (str): Location of L1 data. Folders for both kgain and nonlin + e2eoutput_path (str): Location of the output products: recipes, non-linearity + calibration FITS file, and kgain fits file + + """ + + # figure out paths, assuming everything is located in the same relative location + nonlin_l1_datadir = os.path.join(tvacdata_path, + 'TV-20_EXCAM_noise_characterization', 'nonlin') + kgain_l1_datadir = os.path.join(tvacdata_path, + 'TV-20_EXCAM_noise_characterization', 'kgain') + + e2eoutput_path = os.path.join(e2eoutput_path, 'nonlin_and_kgain_output') + + if not os.path.exists(nonlin_l1_datadir): + raise FileNotFoundError('Please store L1 data used to calibrate non-linearity', + f'in {nonlin_l1_datadir}') + if not os.path.exists(kgain_l1_datadir): + raise FileNotFoundError('Please store L1 data used to calibrate kgain', + f'in {kgain_l1_datadir}') + + if not os.path.exists(e2eoutput_path): + os.mkdir(e2eoutput_path) + + # Define the raw science data to process + nonlin_l1_list = glob.glob(os.path.join(nonlin_l1_datadir, "*.fits")) + nonlin_l1_list.sort() + kgain_l1_list = glob.glob(os.path.join(kgain_l1_datadir, "*.fits")) + kgain_l1_list.sort() + + # both kgain and nonlin dirs have the same MNFRAME files + # only add the files from the kgain list that don't share the same filename + # grab filenames for l1 + nonlin_l1_filenames = [filepath.split(os.path.sep)[-1] for filepath in nonlin_l1_list] + pupilimg_l1_list = nonlin_l1_list # start with the nonlin filelist + # iterate through kgain filelist to find ones that don't share the same filename + for filepath in kgain_l1_list: + filename = filepath.split(os.path.sep)[-1] + if filename not in nonlin_l1_filenames: + pupilimg_l1_list.append(filepath) + + + # Set TVAC data to have VISTYPE=PUPILIMG (flight data should have these values) + set_vistype_for_tvac(pupilimg_l1_list) + + + # Run the walker on some test_data + print('Running walker') + walker.walk_corgidrp(pupilimg_l1_list, '', e2eoutput_path) + + # check that files can be loaded from disk successfully. no need to check correctness as done in other e2e tests + # NL from CORGIDRP + possible_nonlin_files = glob.glob(os.path.join(e2eoutput_path, '*_NonLinearityCalibration.fits')) + nonlin_drp_filepath = max(possible_nonlin_files, key=os.path.getmtime) # get the one most recently modified + nonlin = data.NonLinearityCalibration(nonlin_drp_filepath) + + # kgain from corgidrp + possible_kgain_files = glob.glob(os.path.join(e2eoutput_path, '*_kgain.fits')) + kgain_filepath = max(possible_kgain_files, key=os.path.getmtime) # get the one most recently modified + kgain = data.KGain(kgain_filepath) + + # remove entry from caldb + this_caldb = caldb.CalDB() + this_caldb.remove_entry(nonlin) + this_caldb.remove_entry(kgain) + + # Print success message + print('e2e test for NL passed') + +if __name__ == "__main__": + + # Use arguments to run the test. Users can then write their own scripts + # that call this script with the correct arguments and they do not need + # to edit the file. The arguments use the variables in this file as their + # defaults allowing the use to edit the file if that is their preferred + # workflow. + + TVACDATA_DIR = '/home/jwang/Desktop/CGI_TVAC_Data/' + OUTPUT_DIR = thisfile_dir + + ap = argparse.ArgumentParser(description="run the non-linearity end-to-end test") + ap.add_argument("-tvac", "--tvacdata_dir", default=TVACDATA_DIR, + help="Path to CGI_TVAC_Data Folder [%(default)s]") + ap.add_argument("-o", "--output_dir", default=OUTPUT_DIR, + help="directory to write results to [%(default)s]") + args = ap.parse_args() + # Run the e2e test + test_nonlin_and_kgain_e2e(args.tvacdata_dir, args.output_dir) diff --git a/tests/test_combine.py b/tests/test_combine.py index e6ed9faf..306b7131 100644 --- a/tests/test_combine.py +++ b/tests/test_combine.py @@ -61,6 +61,37 @@ def test_mean_combine_subexposures(): assert combined_dataset_2[0].ext_hdr['FILE2'] in ["2.fits", "1.fits", "3.fits", "4.fits"] assert combined_dataset_2[0].ext_hdr['FILE3'] in ["2.fits", "1.fits", "3.fits", "4.fits"] +def test_mean_combine_subexposures_without_scaling(): + """ + Test mean combine of subexposures for case where num_frames_scaling=False + """ + + image1 = data.Image(img1, err=err1, dq=dq, pri_hdr = prhd, ext_hdr = exthd) + image1.filename = "1.fits" + image2 = image1.copy() + image2.filename = "2.fits" + image3 = image1.copy() + image3.filename = "3.fits" + image4 = image1.copy() + image4.filename = "4.fits" + + dataset = data.Dataset([image1, image2, image3, image4]) + + combined_dataset = combine.combine_subexposures(dataset, 2, num_frames_scaling=False) + + # Check that data and error values are not scaled up + assert(len(combined_dataset) == 2) + assert(np.all(combined_dataset[0].data == 1)) + assert(np.all(combined_dataset[0].err == pytest.approx(1/np.sqrt(2)))) + assert(np.all(combined_dataset[0].dq == 0)) + + # combine again + combined_dataset_2 = combine.combine_subexposures(combined_dataset, 2, num_frames_scaling=False) + + assert(len(combined_dataset_2) == 1) + assert(np.all(combined_dataset_2[0].data == 1)) + assert(np.all(combined_dataset_2[0].err == pytest.approx((1/np.sqrt(2))/np.sqrt(2)))) + assert(np.all(combined_dataset_2[0].dq == 0)) def test_mean_combine_subexposures_with_bad(): """ @@ -91,9 +122,11 @@ def test_mean_combine_subexposures_with_bad(): # the pixel with one bad pixel should have same value but higher error assert combined_dataset[0].data[0][0] == 2 assert combined_dataset[0].err[0][0][0] == pytest.approx(2) + assert combined_dataset[0].dq[0][0] == 0 # 0 because one of the two frames had a good value # compare against a pixel without any bad pixels assert combined_dataset[1].data[0][0] == 2 assert combined_dataset[1].err[0][0][0] == pytest.approx(np.sqrt(2)) + assert combined_dataset[1].dq[0][0] == 0 # the pixel with two bad pixels should be nan assert np.isnan(combined_dataset[0].data[0][1]) @@ -102,7 +135,7 @@ def test_mean_combine_subexposures_with_bad(): def test_median_combine_subexposures(): """ - Test median combine of subexposures. And tests defualt case wihere num_frames_per_group isn't specified. + Test median combine of subexposures. And tests default case where num_frames_per_group isn't specified. """ image1 = data.Image(img1, err=err1, dq=dq, pri_hdr = prhd, ext_hdr = exthd) @@ -123,6 +156,153 @@ def test_median_combine_subexposures(): assert(np.all(combined_dataset[0].err == pytest.approx(np.sqrt(2*np.pi)))) assert(np.all(combined_dataset[0].dq == 0)) +def test_median_combine_subexposures_with_bad(): + """ + Test median combine of subexposures with bad pixels over multiple combinations + """ + # use copies since we are going to modify their values + image1 = data.Image(np.copy(img1), err=np.copy(err1), dq=np.copy(dq), + pri_hdr = prhd, ext_hdr = exthd) + image1.filename = "1.fits" + image2 = image1.copy() + image2.filename = "2.fits" + image3 = image1.copy() + image3.filename = "3.fits" + image4 = image1.copy() + image4.filename = "4.fits" + + # (0,0) has one bad frame + image1.dq[0][0] = 1 + # (0,1) has both pixels bad + image1.dq[0][1] = 1 + image2.dq[0][1] = 1 + + dataset = data.Dataset([image1, image2, image3, image4]) + + combined_dataset = combine.combine_subexposures(dataset, 2, collapse="median") + + assert(len(combined_dataset) == 2) + # the pixel with one bad pixel should have same value but higher error. In both cases the error should be inflated by np.sqrt(np.pi/2) compared to mean error. + assert combined_dataset[0].data[0][0] == 2 + assert combined_dataset[0].err[0][0][0] == pytest.approx(2 * np.sqrt(np.pi/2)) + assert combined_dataset[0].dq[0][0] == 0 # 0 because one of the two frames had a good value + # compare against a pixel without any bad pixels + assert combined_dataset[1].data[0][0] == 2 + assert combined_dataset[1].err[0][0][0] == pytest.approx(np.sqrt(2) * np.sqrt(np.pi/2)) + assert combined_dataset[1].dq[0][0] == 0 + + # the pixel with two bad pixels should be nan + assert np.isnan(combined_dataset[0].data[0][1]) + assert np.isnan(combined_dataset[0].err[0][0][1]) + assert combined_dataset[0].dq[0][1] == 1 + + # combine again + combined_dataset_2 = combine.combine_subexposures(combined_dataset, 2, collapse="median") + + assert(len(combined_dataset_2) == 1) + assert(np.all(combined_dataset_2[0].data == 4)) + + # error for pixel with no bad pixels in original data (i.e. most pixels in data) + assert combined_dataset_2[0].err[0][5][0] == pytest.approx(np.pi) + + # error for pixel with one bad pixel in original data (i.e. no nans after first combination) + assert combined_dataset_2[0].err[0][0][0] == pytest.approx(0.5 * np.pi * np.sqrt(6)) + + # error for pixel with two bad pixels in original data (i.e. 1 nan after first combination) + assert combined_dataset_2[0].err[0][0][1] == pytest.approx(np.pi * np.sqrt(2)) + + assert(np.all(combined_dataset_2[0].dq == 0)) + +def test_combine_different_values(): + """ + Test whether the function correctly combines different values. + """ + + # use copies since we are going to modify their values + image1 = data.Image(np.copy(img1), err=np.copy(err1), dq=np.copy(dq), + pri_hdr = prhd, ext_hdr = exthd) + image1.filename = "1.fits" + image2 = image1.copy() + image2.filename = "2.fits" + image3 = image1.copy() + image3.filename = "3.fits" + image4 = image1.copy() + image4.filename = "4.fits" + + # (0,0) has different values in each frame. Some are bad pixels. + image1.data[0][0] = 5 + image2.data[0][0] = 6 + image3.data[0][0] = 9 + image4.data[0][0] = 19 + + image2.dq[0][0] = 1 + image4.dq[0][0] = 1 + + # (0,1) is a bad pixel in every frame + image1.dq[0][1] = 1 + image2.dq[0][1] = 1 + image3.dq[0][1] = 1 + image4.dq[0][1] = 1 + + dataset = data.Dataset([image1, image2, image3, image4]) + + combined_dataset = combine.combine_subexposures(dataset, collapse="median") + + assert(len(combined_dataset) == 1) + + # Most pixels had good values of 1 in all frames + assert combined_dataset[0].data[0][2] == 4 + assert combined_dataset[0].err[0][0][2] == pytest.approx(2*np.sqrt(np.pi/2)) + + # (0,0) has a different median value calculated ignoring nans + assert combined_dataset[0].data[0][0] == 7 * 4 # median value scaled by number of images + assert combined_dataset[0].err[0][0][0] == pytest.approx(2 * np.sqrt(np.pi)) + + # (0,1) is a nan + assert np.isnan(combined_dataset[0].data[0][1]) + assert np.isnan(combined_dataset[0].err[0][0][1]) + + # the updated bad pixel map only contains one bad pixel (i.e. the pixel for which there were no good values) + assert combined_dataset[0].dq[0][0] == 0 + assert combined_dataset[0].dq[0][1] == 1 + +def test_not_divisible(): + """ + Tests that function correctly fails when the length of the dataset is not divisible by num_frames_per_group. + """ + + image1 = data.Image(img1, err=err1, dq=dq, pri_hdr = prhd, ext_hdr = exthd) + image1.filename = "1.fits" + image2 = image1.copy() + image2.filename = "2.fits" + image3 = image1.copy() + image3.filename = "3.fits" + image4 = image1.copy() + image4.filename = "4.fits" + + dataset = data.Dataset([image1, image2, image3, image4]) + + with pytest.raises(ValueError): + combined_dataset = combine.combine_subexposures(dataset, 3) # Should fail as 4 % 3 != 0 + +def test_invalid_collapse(): + """ + Tests that function correctly fails when collapse type is not valid. + """ + + image1 = data.Image(img1, err=err1, dq=dq, pri_hdr = prhd, ext_hdr = exthd) + image1.filename = "1.fits" + image2 = image1.copy() + image2.filename = "2.fits" + image3 = image1.copy() + image3.filename = "3.fits" + image4 = image1.copy() + image4.filename = "4.fits" + + dataset = data.Dataset([image1, image2, image3, image4]) + + with pytest.raises(ValueError): + combined_dataset = combine.combine_subexposures(dataset, collapse="invalid_option") if __name__ == "__main__": test_mean_combine_subexposures() \ No newline at end of file diff --git a/tests/test_crop.py b/tests/test_crop.py new file mode 100644 index 00000000..9630307e --- /dev/null +++ b/tests/test_crop.py @@ -0,0 +1,220 @@ +import numpy as np +import pytest +from corgidrp.data import Dataset, Image +from corgidrp.l3_to_l4 import crop +from corgidrp.mocks import create_default_headers + +def make_test_dataset(shape=[100,100],centxy=None): + """ + Make 2D or 3D test data. + + Args: + shape (arraylike, optional): data shape. Defaults to [100,100]. + centxy (arraylike,optional): location of 4 pixel dot. Defaults to center of array. + + Returns: + corgidrp.data.Dataset: test data with a 2x2 "PSF" at location centxy. + """ + shape = np.array(shape) + + test_arr = np.zeros(shape) + if centxy is None: + cent = np.array(shape)/2 - 0.5 + else: + cent = [centxy[-i] for i in np.array(range(len(centxy)))+1] + + prihdr,exthdr = create_default_headers() + exthdr['STARLOCX'] = cent[1] + exthdr['STARLOCY'] = cent[0] + exthdr['MASKLOCX'] = cent[1] + exthdr['MASKLOCY'] = cent[0] + exthdr['CRPIX1'] = cent[1] + 1 + exthdr['CRPIX2'] = cent[0] + 1 + prihdr['MODE'] = 'HLC' + + if len(shape) == 2: + test_arr[int(cent[0]-0.5):int(cent[0]+1.5),int(cent[1]-0.5):int(cent[1]+1.5)] = 1 + + elif len(shape) == 3: + test_arr[:,int(cent[0]-0.5):int(cent[0]+1.5),int(cent[1]-0.5):int(cent[1]+1.5)] = 1 + + test_dataset = Dataset([Image(test_arr,prihdr,exthdr)]) + + return test_dataset + +goal_arr = np.zeros((10,10)) +goal_arr[4:6,4:6] = 1 + +goal_rect_arr = np.zeros((10,20)) +goal_rect_arr[4:6,9:11] = 1 + +def test_2d_square_center_crop(): + """ Test cropping to the center of a square using the header keywords "STARLOCX/Y". + """ + + test_dataset = make_test_dataset(shape=[100,100],centxy=[49.5,49.5]) + cropped_test_dataset = crop(test_dataset,sizexy=10,centerxy=None) + + if not cropped_test_dataset[0].data == pytest.approx(goal_arr): + raise Exception("Unexpected result for 2D square crop test.") + +def test_manual_center_crop(): + """ Test overriding crop location using centerxy argument. + """ + + test_dataset = make_test_dataset(shape=[100,100],centxy=[49.5,49.5]) + cropped_test_dataset = crop(test_dataset,sizexy=10,centerxy=[50.5,50.5]) + + offset_goal_arr = np.zeros((10,10)) + offset_goal_arr[3:5,3:5] = 1 + + if not cropped_test_dataset[0].data == pytest.approx(offset_goal_arr): + raise Exception("Unexpected result for manual crop test.") + +def test_2d_square_offcenter_crop(): + """ Test cropping off-center square data. + """ + + test_dataset = make_test_dataset(shape=[100,100],centxy=[24.5,49.5]) + cropped_test_dataset = crop(test_dataset,sizexy=10,centerxy=None) + + if not cropped_test_dataset[0].data == pytest.approx(goal_arr): + raise Exception("Unexpected result for 2D square offcenter crop test.") + +def test_2d_rect_offcenter_crop(): + """ Test cropping off-center non-square data. + """ + test_dataset = make_test_dataset(shape=[100,40],centxy=[24.5,49.5]) + cropped_test_dataset = crop(test_dataset,sizexy=[20,10],centerxy=None) + + if not cropped_test_dataset[0].data == pytest.approx(goal_rect_arr): + raise Exception("Unexpected result for 2D rect offcenter crop test.") + +def test_3d_rect_offcenter_crop(): + """ Test cropping 3D off-center non-square data. + """ + test_dataset = make_test_dataset(shape=[3,100,40],centxy=[24.5,49.5]) + cropped_test_dataset = crop(test_dataset,sizexy=[20,10],centerxy=None) + + goal_rect_arr3d = np.array([goal_rect_arr,goal_rect_arr,goal_rect_arr]) + + if not cropped_test_dataset[0].data == pytest.approx(goal_rect_arr3d): + raise Exception("Unexpected result for 2D rect offcenter crop test.") + + +def test_edge_of_FOV(): + """ Test cropping right at the edge of the data array. + """ + test_dataset = make_test_dataset(shape=[100,100],centxy=[94.5,94.5]) + cropped_test_dataset = crop(test_dataset,sizexy=10,centerxy=None) + + if not cropped_test_dataset[0].data == pytest.approx(goal_arr): + raise Exception("Unexpected result for edge of FOV crop test.") + +def test_outside_FOV(): + """ Test cropping over the edge of the data array. + """ + + test_dataset = make_test_dataset(shape=[100,100],centxy=[95.5,95.5]) + + with pytest.raises(ValueError): + _ = crop(test_dataset,sizexy=10,centerxy=None) + +def test_nonhalfinteger_centxy(): + """ Test trying to center the crop not on a pixel intersection. + """ + test_dataset = make_test_dataset(shape=[100,100],centxy=[49.5,49.5]) + cropped_test_dataset = crop(test_dataset,sizexy=10,centerxy=[49.7,49.7]) + + if not cropped_test_dataset[0].data == pytest.approx(goal_arr): + raise Exception("Unexpected result for non half-integer crop test.") + +def test_header_updates_2d(): + """ Test that the header values are updated correctly. + """ + + test_dataset = make_test_dataset(shape=[100,100],centxy=[49.5,49.5]) + test_dataset[0].ext_hdr["MASKLOCX"] = 49.5 + test_dataset[0].ext_hdr["MASKLOCY"] = 49.5 + test_dataset[0].pri_hdr["CRPIX1"] = 50.5 + test_dataset[0].pri_hdr["CRPIX2"] = 50.5 + + cropped_test_dataset = crop(test_dataset,sizexy=10,centerxy=None) + + if not cropped_test_dataset[0].ext_hdr["STARLOCX"] == 4.5: + raise Exception("Frame header kw STARLOCX not updated correctly.") + if not cropped_test_dataset[0].ext_hdr["STARLOCY"] == 4.5: + raise Exception("Frame header kw STARLOCY not updated correctly.") + if not cropped_test_dataset[0].ext_hdr["MASKLOCX"] == 4.5: + raise Exception("Frame header kw MASKLOCX not updated correctly.") + if not cropped_test_dataset[0].ext_hdr["MASKLOCY"] == 4.5: + raise Exception("Frame header kw MASKLOCY not updated correctly.") + if not cropped_test_dataset[0].pri_hdr["CRPIX1"] == 5.5: + raise Exception("Frame header kw CRPIX1 not updated correctly.") + if not cropped_test_dataset[0].pri_hdr["CRPIX2"] == 5.5: + raise Exception("Frame header kw CRPIX2 not updated correctly.") + if not cropped_test_dataset[0].ext_hdr["NAXIS1"] == 10: + raise Exception("Frame header kw NAXIS1 not updated correctly.") + if not cropped_test_dataset[0].ext_hdr["NAXIS2"] == 10: + raise Exception("Frame header kw NAXIS2 not updated correctly.") + if not cropped_test_dataset[0].err_hdr["NAXIS1"] == 10: + raise Exception("Frame err header kw NAXIS1 not updated correctly.") + if not cropped_test_dataset[0].err_hdr["NAXIS2"] == 10: + raise Exception("Frame err header kw NAXIS2 not updated correctly.") + if not cropped_test_dataset[0].dq_hdr["NAXIS1"] == 10: + raise Exception("Frame dq header kw NAXIS1 not updated correctly.") + if not cropped_test_dataset[0].dq_hdr["NAXIS2"] == 10: + raise Exception("Frame dq header kw NAXIS2 not updated correctly.") + +def test_header_updates_3d(): + """ Test that the header values are updated correctly. + """ + + test_dataset = make_test_dataset(shape=[3,100,100],centxy=[49.5,49.5]) + test_dataset[0].ext_hdr["MASKLOCX"] = 49.5 + test_dataset[0].ext_hdr["MASKLOCY"] = 49.5 + test_dataset[0].pri_hdr["CRPIX1"] = 50.5 + test_dataset[0].pri_hdr["CRPIX2"] = 50.5 + + cropped_test_dataset = crop(test_dataset,sizexy=10,centerxy=None) + + if not cropped_test_dataset[0].ext_hdr["STARLOCX"] == 4.5: + raise Exception("Frame header kw STARLOCX not updated correctly.") + if not cropped_test_dataset[0].ext_hdr["STARLOCY"] == 4.5: + raise Exception("Frame header kw STARLOCY not updated correctly.") + if not cropped_test_dataset[0].ext_hdr["MASKLOCX"] == 4.5: + raise Exception("Frame header kw MASKLOCX not updated correctly.") + if not cropped_test_dataset[0].ext_hdr["MASKLOCY"] == 4.5: + raise Exception("Frame header kw MASKLOCY not updated correctly.") + if not cropped_test_dataset[0].pri_hdr["CRPIX1"] == 5.5: + raise Exception("Frame header kw CRPIX1 not updated correctly.") + if not cropped_test_dataset[0].pri_hdr["CRPIX2"] == 5.5: + raise Exception("Frame header kw CRPIX2 not updated correctly.") + if not cropped_test_dataset[0].ext_hdr["NAXIS1"] == 10: + raise Exception("Frame header kw NAXIS1 not updated correctly.") + if not cropped_test_dataset[0].ext_hdr["NAXIS2"] == 10: + raise Exception("Frame header kw NAXIS2 not updated correctly.") + if not cropped_test_dataset[0].ext_hdr["NAXIS3"] == 3: + raise Exception("Frame header kw NAXIS3 not updated correctly.") + if not cropped_test_dataset[0].dq_hdr["NAXIS1"] == 10: + raise Exception("Frame dq header kw NAXIS1 not updated correctly.") + if not cropped_test_dataset[0].dq_hdr["NAXIS2"] == 10: + raise Exception("Frame dq header kw NAXIS2 not updated correctly.") + if not cropped_test_dataset[0].dq_hdr["NAXIS3"] == 3: + raise Exception("Frame dq header kw NAXIS3 not updated correctly.") + if not cropped_test_dataset[0].err_hdr["NAXIS1"] == 10: + raise Exception("Frame err header kw NAXIS1 not updated correctly.") + if not cropped_test_dataset[0].err_hdr["NAXIS2"] == 10: + raise Exception("Frame err header kw NAXIS2 not updated correctly.") + if not cropped_test_dataset[0].err_hdr["NAXIS3"] == 3: + raise Exception("Frame err header kw NAXIS3 not updated correctly.") + +if __name__ == "__main__": + test_2d_square_center_crop() + test_2d_square_offcenter_crop() + test_2d_rect_offcenter_crop() + test_edge_of_FOV() + test_outside_FOV() + test_nonhalfinteger_centxy() + test_header_updates_2d() + test_header_updates_3d() diff --git a/tests/test_data/bd_75d325_stis_006.fits b/tests/test_data/bd_75d325_stis_006.fits new file mode 100644 index 00000000..b0627aa2 --- /dev/null +++ b/tests/test_data/bd_75d325_stis_006.fits @@ -0,0 +1,232 @@ +SIMPLE = T / BITPIX = 16 / NAXIS = 0 / EXTEND = T /FITS extensions present? TARGETID= 'BD75 ' / DBTABLE = 'CRSPECTRUM' / MAPKEY = 'calspec ' / AIRMASS = 0.00000 /Mean airmass of the observation DESCRIP = 'Standard star flux with an HST/STIS calibration------' / SOURCE = 'Flux scale of Bohlin, et al.2020, AJ, 160, 21' / USEAFTER= 'Jan 01 2000 00:00:00' / COMMENT = 'HST Flux scale is based on TMAP AND TLUSTY WD NLTE MODELS' / PEDIGREE= 'INFLIGHT 1997 to 2022' / HISTORY FILE WRITTEN BY stismrg.PRO ON 23-Sep-2022 08:45:55.00 HISTORY FILE WRITTEN BY STISREDUCE.PRO ON 23-Sep-2022 08:45:54.00 HISTORY coadd lst for G230LB from dir=dat/: HISTORY o4a506010 o3wy02010 oa8b01040 oa8b010k0 oa8b11030 HISTORY EPOCH: 1997.138:16:45:45-2009.215:06:37:35 HISTORY gwidth for G230LB flux cal=11 w/ /Users/bohlin/stisidl/scal/sens11_g230lHISTORY SYS-ERROR is the broadband ~1% SYSTEMATIC UNCERTAINTY of STIS fluxes. HISTORY Bohlin(2014,PASP,126,711). BOTH THE STAT-ERR AND SYS-ERR ARE 1-SIGMA.HISTORY Net & Flux corr for time(2019,AJ,158,211)&CTE loss(2022, CTE update, in HISTORY coadd lst for G430L from dir=dat/: HISTORY o3wy02030 o4a5050k0 oeju01010 oeju01020 oeju02010 oeju02020 oeju03010 HISTORY oeju03020 HISTORY EPOCH: 1997.138:16:58:53-2022.103:07:47:15 HISTORY gwidth for G430L flux cal=11 w/ /Users/bohlin/stisidl/scal/sens11_g430l.HISTORY Net and Flux corr for time(2019,AJ,158,211)&CTE loss(2022, CTE update, iHISTORY MERGE POINT = 3065.0 HISTORY coadd lst for G750L from dir=dat/: HISTORY o4a505010 o49x03020 oeju01070 oeju02070 oeju03070 HISTORY EPOCH: 1997.268:20:55:21-2022.103:08:44:47 HISTORY gwidth for G750L flux cal=11 w/ /Users/bohlin/stisidl/scal/sens11_g750l.HISTORY MERGE POINT = 5450.0 HISTORY HISTORY HISTORY HISTORY Units: Angstroms(A) and erg s-1 cm-2 A-1 HISTORY Written by MAKE_STIS_CALSPEC.pro 25-Oct-2022 HISTORY Sources for this spectrum: HISTORY ---------------- ---------------------- ---------- HISTORY WAVELENGTH RANGE SOURCE FILE HISTORY ---------------- ---------------------- ---------- HISTORY 1140 1785 FOS BLUE BD_75D325_FOS_003 HISTORY 1785 10237 STIS bd_75d325.mrg HISTORY All wavelengths are in vacuum w/ model adjusted for radial vel= -54.0 HISTORY CHANGES from previous version: HISTORY CTE update: STIS ISR 2022-07 HISTORY For details see: HISTORY http://www.stsci.edu/hst/instrumentation/reference- HISTORY data-for-calibration-and-tools/astronomical- HISTORY catalogs/calspec FILENAME= 'bd_75d325_stis_006.fits' / WMIN = 1140.59997559 /Minimum Wavelength WMAX = 10237.6640620 /Maximum Wavelength END XTENSION= 'BINTABLE' /Written by IDL: Tue Oct 25 10:50:06 2022 BITPIX = 8 / NAXIS = 2 /Binary table NAXIS1 = 30 /Number of bytes per row NAXIS2 = 3359 /Number of rows PCOUNT = 0 /Random parameter count GCOUNT = 1 /Group count TFIELDS = 7 /Number of columns EXTNAME = 'SCI ' / EXTVER = 1 / INHERIT = T / TFORM1 = '1D ' /Real*8 (double precision) TTYPE1 = 'WAVELENGTH' /Label for column 1 TUNIT1 = 'ANGSTROMS' /Units of column 1 TDISP1 = 'G10.4 ' /Display format for column 1 TFORM2 = '1E ' /Real*4 (floating point) TTYPE2 = 'FLUX ' /Absolutely calibrated net spectrum TUNIT2 = 'FLAM ' /Units of column 2 TDISP2 = 'E12.4 ' /Display format for column 2 TFORM3 = '1E ' /Real*4 (floating point) TTYPE3 = 'STATERROR' /Statistical flux error TUNIT3 = 'FLAM ' /Units of column 3 TDISP3 = 'E12.4 ' /Display format for column 3 TFORM4 = '1E ' /Real*4 (floating point) TTYPE4 = 'SYSERROR' /Systematic flux error=0.01*FLAM TUNIT4 = 'FLAM ' /Units of column 4 TDISP4 = 'E12.4 ' /Display format for column 4 TFORM5 = '1E ' /Real*4 (floating point) TTYPE5 = 'FWHM ' /FWHM spectral resolution TUNIT5 = 'ANGSTROMS' /Units of column 5 TDISP5 = 'G6.2 ' /Display format for column 5 TFORM6 = '1I ' /Integer*2 (short integer) TTYPE6 = 'DATAQUAL' /Data quality: 1=good, 0=bad TUNIT6 = 'NONE ' /Units of column 6 TDISP6 = 'I2 ' /Display format for column 6 TFORM7 = '1E ' /Real*4 (floating point) TTYPE7 = 'TOTEXP ' /Total exposure time TUNIT7 = 'SEC ' /Units of column 7 TDISP7 = 'G10.2 ' /Display format for column 7 END @f`.7E,,&|,?@f`.,,+=?@f`.Y_,@,%?@f`.[,*-,?@f`.,,r?@f`.[,,`,VU?@f`.U,8,'4,?@f`.p%,Z,?@f`.,,E?@.,O,c?h@.p,#,ձ?@.8',t,/?@.<,,!?@.,u,?@ +.߭/,o5,ð?@.ݻv,g,z?@@.s,`?,?4@.-,W,w?4@.;,P:a, 2+?@.p,L#+,?@".H,^_/,?@&.;8,_,H?@*.Р,XC,po?@..,Vq,?@2. +,Nɰ,?4@7.{,6j_,?0@;.q,-,?@?.c,'dc, ?@C.» ,j,wx?@G.",u+c?@K. ,(,i?@O3@.Iw,,?h@S3@.U>,t,?@W3@.t,,j?@[3@.B,+?@_3@.,g+j?@c3@.h,>+*?@g3@.~,[,b?@kL.,CB,g?4@of`.,+t?0@sf`.,v,c?@wf`.˪*+,?@{f`.EL+Aj+`?@f`.++-?@. +j+?4@.y+IW+?4@.p++-8?@.c+a^+>q?@.+Zd+h?@.+P+슷?@.:+ک+J?@@.;+R+?4@.i+߿+Jx?4@.tV+#+ ?@.լ+Te+?@.++?@.+C+?@.R++4y?4@.m+Cr+r?0@.wN++?@.r+U:+z?@.+E+ѿ\?@.gR++?@.p5+ib+ɒ?@3@.;++L?h@3@.++3?@3@.~++ڪ?@3@.~+c+\?@3@.h^++?@L.++֭?4@f`.s+H+SN?0@f`. ++c?@f`.R+#+?@f`.Iz+h=+?@f`-i6+-+o?@-ǰ+*ܼ?4@.*$+KZ+]?4@.++ ?@ .Bl+,+h;?@.Ϣ+:+)?@.vT+~)+>?@.+]+$.?h@.u'+P+߾]?@ .+f+?@$.EB+>+?@(.K+:+-?@,.qD++J?@1.=C+TV+}?d@5.+o+ٕ>?@9.zQ+=+d?@=. ++/A+ؙ?@A.pg+@+!m?@E.P++L1?4@I3@.8++%?4@M3@.V++?@Q3@.9+D+?@U3@.+s{+w?@Y3@.hJ+Y%+?@]L.j<+Xm+D?4@af`.+mV+?0@ef`. +g+Y?@if`.bM+Q|+$?@mf`.i+S&+?@q.+eU:+0?4@u.lw+k+h?4@y.+w +?@}.{+r+k4?@.+o<+]?@.~+ng+?@. +hb+?h@.o+l]+w+?@.{6+e+?@.u+^.+C?@.e=+e4+C|?@.+nG+ ?4@.P+i+?0@.+g+g?@.l+f +?@.r+c@+P?@.B+Z+ZG?@3@.+^+i?h@3@.|+k{+?@3@.F+k-+?@3@.f+h+z$?@3@.+e8f+??@f`.+_m+/?d@f`.3+[+>?@f`.+]:+1?@f`.8+[:+C?@f`. +Z6+o?@ހ. +W+[X+:?4@♠.+Y+?4@晠.P+[`+4?@Ꙡ.y$+bN+c?@.i+f+ϙ?@@. +{+\9+?4@..9+c]++?4@.T+aV+[?@.+[C+?@.+^B+cO?@.\++\+´*?@ .+Y1+?d@.%D+T+?@.-+Wu+?@.x+]j+?@.!+Z#+?@3@.$ +S!+6?h@#3@.q+[B+-p?@'3@.+kd+q?@+3@.Ht+u5+{?@/3@.X+wD+Ԑy?@33@.@+v+0?@7f`.+w+ث?d@;f`.+f+֟?@?f`.j+X{+ƐZ?@Cf`.3i+U(U+r5?@Gf`.wB+[}+u?@K.+`+-!?h@O.+\+c?@S.+Mq+?@W.]+K+?@[.3 +Mx+xt?@_@.~+Qw+?4@c.s+?@.E+=X+?@.+Dr+]?@3@.D+E&+cn?h@3@.+:t+F+=by+[ ?@3@.+?Xw+?@ 3@.+<2+?@ 3@.+;+&?@f`.+:+?d@f`.+8S+]V?@f`.y+1+?@f`.+5_+?@!f`.0 +: +?@%.+=w"+v?4@).h+>+7?4@-.i>+5ql+?@1.T+1 +n?@5.x+9+~?@9.|?+@|+3?@=@.x+?+?4@A.kl+D+:?4@E.n+=+{?@I.e+,U1+d?@M.ga9+)Z+?@Q.xD+/2e+?@V.l=+*b+;?d@Z.+4+ ?@^.+7Z+Kk?@b.+8E+W?@f.r+6t+ ?@j.z+/A[+< ?@n3@.fj+'!+W?h@r3@.t+,]+f?@v3@.h#++1+M?@z3@._T+1+?@~3@.n+9T+?@3@.q_+:n~+5,?@f`.x +<+z?d@f`.n+,}+?@f`.q++y+ ?@f`.j+3A0+F?@f`.+9+t!?@f`.&+:`+&h?@.9+7A;+^(?4@.tbS+1`+?4@.nЬ+-Ջ+?@.f+$}W+MZ?@.W=++v?@.^++Z?@.zG++?+0?@.M+.+{?h@.[+/ ++?@.vv+(+p?@.e$k+!h3+Y;?@.a+ 1h+ ?@.w+(+?@.p%+&+cA?4@.gR+"9+?0@.{++MY+?@.+*r+?@.p+$H+?@.nM+#Z-+2?@.]2+g+&?@.q*+%o+g?4@3@.v+&k+ ?4@3@.v+%+q]?@3@.[++?@3@.U++b+?@3@.O++i?@3@.J+d+g?@ 3@.lN+!+]?@f.o+#>+?h@f.o+++ ?@f.~+'6+*?@f.iS+F+'?@f.gA++?@#f.fϣ+L+]?@'f.h++?@+.`S+N+7?h@/.POr+H+?@3.Q+?+t?@7.g+x+i?@;.i+B+ ?@?.rha+!t+%?@C.l+V+?@G.b+X+?@K@.`B+L+dk?0@O.[4+il+?4@S.a+%+?@W.M+1.+I?@[.O+i+ ?@_._\++?@c.d+e+d?@g.jz+*+y?@k`.h]++V?0@p.i`+?++p?4@t.f#++D?@x.V5+%+ ?@|.Sc++e?@.PM+]+7a?@.H++?@.Y+e+3?@.VW.+jq+?@._+r+Θ?@3@.W+ց+9?h@3@.Jʴ++g?@3@.L++?@3@.S+T+C?@3@.F+ e+{?@3@.IJ+ V+?@3@.P+ w+?@3@.H?+ +?@3@.? + 5+vv?@3@.H+ +r?@f`.F+ o+s?d@f`.E_+ 3+}?@f`.Sv+t+?@f`.T+@+A?@f`.O7L+r+9?@f`.D!+ +g+|B{?@f`.I+ B+?@f`.TU++{?@f`.T(++G?@f`.U~++?@h.; +-+q?@l.5Sa+ =+i<?@p.> +J+t?@t.Eo+ܶ+}ۯ?@x.9Y+ K+na?@|.=({+ 98+sA?@.;Z++q?@.Hg+ +?@.Q!++(?@.N[+;+ ?d@.O++d?@.V+"[+'?@.R-+ +c?@.Jq+/+>?@.IF5++_?@.L+\+?@.L+u+0?@.M+"+?@.H_++$?@.G3+ +o+j?@.L`+ %+Y?@.A{+)+x?@.?+H+v?@.I+ at+{?@.Qj+ ++?@.O+ +_?@.B0E++y?@.+/+r?@.2V*+eX?@.99V*b+n'?@.BaI++yq?@.:Z*+o?@.>m+S+t?@.2ݺ+W+f?@.**'+[N?y@.4L+Xu+g0?y@.>I7++t{?@.Ff++m?@.Di+:+|v?@.?n&+ /+vl?@ .< *+q?@.E++}?@.E+ YO+~i_?@.=+ ݲ+sJ?@..W+0+_r?@ .0_+T+b?@$.6G+f"+k ?@(.+h?@D.3Bg*'+f|?@H.6S*+jI?@L.6>+{+jN?@P@.9+R_+nu?y@T.5D++i++g?@-T* +@v?@@-)*3(+C?@.8*H++.]?@ .*6+:?@L.w*~+C`?@3@.UZ*͙+H?@.n*+H?@.og*!+H+=8?@.(*ʌt+@!M?@ .*:,+@V?@.*+@??@.\*+B?@@.1*n+E^?@.$*N+B?@ .;*+?8?@L.ө*m2+6?@ 3@. *n+5(?@&.h*+;g4?@, .S*_+B8?@1.*{++B,M?@7.bN*M+Is;?@=@. +*;)+I ?@C.Э*'+BR?@If`.vU* +Dn?@OL.*s+F?@UL.P*+B?@[.V*w+:?@a .F*+;<=?@f.:*K++=?@l@.*++@?@r.*+A`V?@x.IB*3+;? ?@~ .j*+7?@L. X*+5!?@3@. *+/}?@.*+/OE?@ . +n*S{+2"?@. *%+5 +?@. *B+5Q?@@.*+6x.?@. l *w+5.?@f`.*[+6E?@L. 4*J-+4?@L. c*+2~?@. *?0+2?@ . +b*+1".?@.7*`+&8j?@-V*-+o?@ܳ@-\n*+n?@-*+!v?@ .*G*z+$ ?@L.W*+%4?@3@.e*+'?@3@.pr*U8+*ͻ?@.`\* +,v?@.*+&q?@ .2*+a+'?@@.P*Xw+-3x?@@. +L*p+1?@. +;*>+0?@#f`. @y*Y+/?@)L.F*[+-:?@/3@.*+,?@5."*wq++G?@; .%*o+.7;?@@. +~*)+1F?@F@.P*+.|V?@L.d*+'?@R.K*4+)VS?@X . *+'2?@^L-*&+"?@d3@.,*+*v?@j3@.6* ++ ?@p."*a+*i]?@u.*+*I?@{.I*7+)S,?@@.*d\+' +?@@.a*~+&?@.I*+( ?@f`.1*+%^?@L.*(A+$*?@3@.7*/0+%eP?@.IN*w+( _?@ .*+'U|?@.*+(I?@@.>-*$+'}?@@.*+'-?@™.K{*+%V?@Ȁ .t*+&?@L.g*2+%E?@L.o*+$V?@3@- :*+#;b?@- *6+#b?@伮.R)*7s@/Cf@'.2)3*k@/Cf@|w6.)*O @/Cf@i.)*@/Cf@w1.Rb)U@*@}@/Cf@?-)*Q@/Cf@q`4P-W)L*`@/Cf@ +˰-z)|*@/Cf@k~-Z)*%@/Cf@B-F)*-@/Cf@f@ -A)O*q[@/Cf@ -^c)Th*@/Cf@&`>- )*oc@/Cf@+ -B)p;* @/Cf@1[_B-)*@/Cf@6ؠ #-e). +*GK@/Cf@--t)v*_@/Cf@]F-e)*h@/Cf@bÀ-')*-|@/Cf@h@H-!)+j*@/Cf@m_;-I)V *@/Cf@s;6-&)@8*@/Cf@x?^W-0)G*hp@/Cf@~6 -z)s *@/Cf@-~u)*@/Cf@1e-O)}(*(@/Cf@ !-)|L*@/Cf@,/+-A)|Ap*ß@/Cf@ r~--B)|*@/Cf@'-S)|*f@/Cf@@%N-){*c@/Cf@"h-()y*@/Cf@_-)w!*Oj@/Cf@ q-)ts*"@/Cf@-))r*2@/Cf@L-馏)qY*Q@/Cf@Ŗ -馏)p*Q@/Cf@_'-)o*@/Cf@Б-)lF*C@/Cf@'-)i*m@/Cf@ۍ?_-)j@*F@/Cf@ +j-B)j|*@/Cf@戠 #-:)i,*X@/Cf@@ -j)gnU*@/Cf@9-)eY*1@/Cf@ o-*G)e8?*@/Cf@-Ȝ)e6*@/Cf@OU-)d*@/Cf@z"-K6)cA*@/Cf@ #-s)c*5@/Cf@v-Ṱ)`ω*v@/Cf@?-E)_Q*@/Cf@q-Du)`_*U@/Cf@" o-)a*@/Cf@(m]-T)a+*@/Cf@--)^*@/Cf@3i?^W-d)\Ed*^@/Cf@8y-NN)[*@/Cf@>e :-π)Z*@/Cf@C-ޥ)X*~p@/Cf@I`>-)W**@/Cf@Nޟ˰-5)Wk*ڬ@/Cf@T\!-#)W!*@/Cf@Y` -)Wk*#@/Cf@_X`f-P)V*f@/Cf@d@ -)U B*@/Cf@jT-A)RV *q@/Cf@o-?)O1*m@/Cf@uPH +-)O+*@/Cf@z -Z)P/*@/Cf@L-K|)P(*D@/Cf@*s-Nq)P#l*@/Cf@Gr-޳)Np*@/Cf@Ս-Ս)Og*@@/Cf@D-m)M߀*@/Cf@-;)LW*:@/Cf@@H +-y])KA*y@/Cf@?-)J$*@/Cf@<@W -Q)HV*d@/Cf@` -C)H+*s@/Cf@8`f- )GTN*@/Cf@-)F%*?@/Cf@4-o)D*2@/Cf@Dzh-՘t)C]*~@/Cf@0H-)A@*-@/Cf@Ү- )AH*R@/Cf@-OU-ժx)AE*@/Cf@ݫ?-3)At*@/Cf@)_a-Ց)A*+@/Cf@觟-ԧ)@*J@/Cf@%Ex-Ԍ)?*6@/Cf@9-Ӷ)=w*@/Cf@"-,);*s@/Cf@_-ˌ)9X|*E@/Cf@˰-/)9e+*Q@/Cf@ @-):_*@/Cf@-'L):5*۶@/Cf@_a-ug)9WU*i@/Cf@-{t)8X*@/Cf@덂-͗Q)7 +0* @/Cf@%-k)5k*_@/Cf@*_t-)6(7*0@/Cf@0>-)6Oj*.@/Cf@5-)5e*7@/Cf@; ?_-ɐ)4ȓ*X +@/Cf@@-g)3y*@/Cf@F +*s-˞)2V*Q:@/Cf@K@-.)2?{*&@/Cf@QY-e)3dS*tV@/Cf@V :-͒)3 +e*)@/Cf@\`-ʿ)0)*@/Cf@a o-ǁ).<:*]@/Cf@gH +-Ƈ)-x;*~@/Cf@l~-)..*|@/Cf@qJ-ȆH)-W*U@/Cf@w{_B-Ġ)+y*@/Cf@|~-.v)+r*@/Cf@x@-Dž)**cL@/Cf@Y-))*}@/Cf@u D-9)(P*|s@/Cf@- +)(Y*|6}@/Cf@q-Ĺ)'*{ι@/Cf@ )-5Z)'*|m@@/Cf@n-/M)'$*}0@/Cf@CR-AQ)'QW*}?@/Cf@l-%)&j*|Y@/Cf@J-\)%5*z$@/Cf@hB-`)$N*yn@/Cf@Q-:)$*y@/Cf@eՍ-()$*yͬ@/Cf@-<)$G*y@/Cf@c @-7)#t*xD@/Cf@ o-)"*w6@/Cf@`?-J)!*v" @/Cf@[-)*uG@/Cf@]`H- )$*u@/Cf@-{n)ؿ*u@/Cf@Z-Z)u*t @/Cf@?^W-m7)*s@/Cf@W*-I).7*r@/Cf@ր-)*qj@/Cf@U D-R)#*qJ@/Cf@ ӿ-)*qSL@/Cf@R_t-۰)Ƕ*pu]@/Cf@\1-w)*o@/Cf@O o-Ƹ)*o@/Cf@!_;-w)*mQ@/Cf@'LJ-?)Da*ne@/Cf@,6-,)*n@/Cf@2JJ-Xa)J2*n@/Cf@7L-))*o@/Cf@=G*- +E)*oiO@/Cf@BƠY-G)6*ot@/Cf@HE@%N-);O*m#@/Cf@M9-pr)]*m\@/Cf@SBh-)*l6@/Cf@X +|[-x +)*j@/Cf@^@?-3)*jD@/Cf@c-Z)^*i@/Cf@i= q-p)*i@/Cf@n/+- )@.*i@/Cf@t;_B-!) b*i @/Cf@y r~-Q) T*i]@/Cf@8B-Q) э*i]@/Cf@-ٶ) c*hĿ@/Cf@6Y-o) 4*h=H@/Cf@_mY-) I*h)@/Cf@4?-.) +fn*g@/Cf@ @-') z*fw@/Cf@1Tz-8l)1*ef@/Cf@>-)*c@/Cf@/'-@)FN*bN@/Cf@-$)n*b@/Cf@-`H-;)*bۊ@/Cf@@W -.)P*b?@/Cf@+-s)_*c#@/Cf@ƪ*s-y)#*a@/Cf@(8-)*^L5@/Cf@ѧ*-)*[7@/Cf@& -r)V*\@/Cf@ܥm-))I*]T@/Cf@$-)*^@/Cf@磀-R)l*^@/Cf@"伮-(a*]@/Cf@`4P-()*\@/Cf@ _-W(*b*[G@/Cf@`-(3*Zf@/Cf@y-/(x*Y@/Cf@N˰-2(<*Z@/Cf@ -(r*Y:@/Cf@ ͟m-:`( *X@/Cf@ D-(D*Xe@/Cf@L/+-(*W@/Cf@ -G/(*WeZ@/Cf@˟`- +b(H*W@/Cf@ @-](*W-@/Cf@J-l(H*VM@/Cf@ +-͹(B6*R@/Cf@ɠe-#(*MV@/Cf@"?^W-U(*K @/Cf@%H>--(y*N)@/Cf@(?-+(W*NLt@/Cf@* o- (A*PSD@/Cf@-?-$(*R@/Cf@0F-u(*R@/Cf@3_;-|(&*S@/Cf@5 q-(WS*R@/Cf@8c-(*S8@/Cf@;E :-N( *Q@/Cf@>/+-lS(p*MWu@/Cf@@ -(+*JS@/Cf@C`- +(U*K@/Cf@FC@ -HL(5*M)W@/Cf@Iڲ-)(:*M@/Cf@K` -(7t*M@/Cf@NTz-(53*M@/Cf@QA +|[-γ(ƒ*L@/Cf@TL-+(%)*LY@/Cf@V #-(z*L4X@/Cf@Y?-(ك*Kh@/Cf@\?-p (*L@/Cf@^_ -J(Ԝ*K+@/Cf@a1- (֊*Jg-@/Cf@d~˰-(0*H@/Cf@g> -!(ӔA*Gn?@/Cf@iEx-S(*<*E@/Cf@l_mY-(*I*D@/Cf@o} :- Q(*C@/Cf@r(5*D'@/Cf@z{`-H(*E{@/Cf@}:j-4( w*FP@/Cf@-b(C*E}@/Cf@@-(Q*C4@/Cf@yTz-ӝ(̒N*A@/Cf@9 +|[-(f*Am@/Cf@L-(y&*B@/Cf@>--(Ǿ`*Bk@/Cf@x_-(ƒ*A@/Cf@8-J@(*@_ @/Cf@'-SB(Š*@j@/Cf@?-bQ(ħx*?6*@/Cf@v-p(Ë*>i@/Cf@6-}V(d*?X@/Cf@?-#([n*?@/Cf@ q-(M*?@/Cf@uc-](*=@/Cf@5 D-B($K*;6@/Cf@w6-"(*9@/Cf@!-ڤ(*9i@/Cf@t - (wh*:@/Cf@3-2(*<@/Cf@`-|(T*>+@/Cf@ @-((*>v@/Cf@rڲ-(W *>*@/Cf@2` -Q("%*<@/Cf@-(Q*8f_@/Cf@ı o-(*3@/Cf@q +|[-XO(>*1@/Cf@1L-,(*/@/Cf@>--(*1K@/Cf@ϰ- (M*5@/Cf@pH +-n(g*9@/Cf@/-`3(f*;\k@/Cf@Q-(*;@/Cf@گ?-(؝*:k@/Cf@n-uO(ط*8@/Cf@.˰-(&*5+&@/Cf@?-$(*1X@/Cf@c|-;(*.a@/Cf@mEx-֮( **@/Cf@-_mY-s(Z[*)@/Cf@ D-O(--~( *#-:@/Cf@X #-{(*!@[@/Cf@_-yQ(*@/Cf@?-xf( *!@/Cf@-xg(*6@/Cf@W o-yQ(~*@/Cf@'-yl (>!*M@/Cf@_ -x'(*@/Cf@?-uD(k*8}@/Cf@W1-r,(u*@/Cf@ -m$(*$@/Cf@֟˰-kP&(*@/Cf@_;-gH(*@/Cf@V?-`(*Ҍ@/Cf@c|-Vh(z* Su@/Cf@ q-H@y(q|*)D@/Cf@m-:_"(h~k)A@/Cf@U_mY-:c(g9)@/Cf@ @%N-E(n)j@/Cf@ :-Q(u_*F[@/Cf@@-\*(z* @/Cf@T/+-c(~*@/Cf@!-gp(P*_@/Cf@@W -ii(e*bh@/Cf@9-h*(*@/Cf@S-e(}*@/Cf@`-g(~O,*9@/Cf@!Ӏ-jJ('*@/Cf@$@ -j(~*1@/Cf@'S @-l(H4*`@/Cf@*"-nM()*d@/Cf@,ҿڲ-nM(*d@/Cf@/J-nT(1%*s@/Cf@2R@-m(}*!@/Cf@5-m(}h* @/Cf@7Tz-ml(|*@/Cf@: o-l({_*f@/Cf@=Q +|[-iG(z*|*p@/Cf@@`4P-j}(za*@/Cf@BL-lb(z*I@/Cf@E8-kD(zOa*@/Cf@HP>--j(y@*/ @/Cf@K-h(x*@/Cf@M_-c(tO*Q@/Cf@PH +-`?b(r=X*@/Cf@SP-c(tf*@/Cf@V o-fܽ(u{r*y@/Cf@XQ-g(u'*@/Cf@[_ -e(s)*@/Cf@^Oy-c(r*0@/Cf@a1-da(r3**@/Cf@c -d(r*iz@/Cf@f-dK(r*[@/Cf@iN_;-c(qC*0@/Cf@l -aԵ(pF*"@/Cf@nc|-`(oB*@/Cf@qEx-`s(oܙ*@/Cf@tMc-`q(o|*@/Cf@w _mY-_(n*`@/Cf@y D-]&(lH* q@/Cf@|@-ZE(kn * 6@/Cf@Lw6-Y(jgB* +@/Cf@ !-Y(jk* k@/Cf@@W -ZZ(jo* '@/Cf@ -X(i* +)@/Cf@K-V(g* @/Cf@ `-Sp(f*Ri@/Cf@ˀ-S:(e*/@/Cf@@ -P(dK*@/Cf@Jj-Ld(b*^@/Cf@ +"-F2(^()@/Cf@ʠ-<\ (XL)@/Cf@` --NP(O)@/Cf@J@-$X-(I9)\c@/Cf@ -(F)?@/Cf@ o-#(H){@/Cf@ +|[-/=(Ou)N@/Cf@I`4P-9 (U)@/Cf@ L-A;(YxF)Vs@/Cf@8-Ef([J)@/Cf@>--Ge(\T)ӵ@/Cf@H-Ie5(]*@/Cf@?-J(]K*I~@/Cf@-JW(]K*L`@/Cf@ o-IB(]\f* @/Cf@G'-J(]su*3@/Cf@_ -Jz(]*@/Cf@y-J߃(]M*ֺ@/Cf@Ɇ-Lt(^~*"@/Cf@F˰-Mu(^*~m@/Cf@-LP(^5&*@/Cf@?-K(]%*>}@/Cf@Ԇc|-J(\Z%*3@/Cf@EEx-IB([J* @/Cf@c-F#(YƬ)4@/Cf@@%N-F:(YW)@/Cf@߅ :-H(Z͊*]%@/Cf@D@-J(\,*3@/Cf@/+-K([*@/Cf@`-I([S* +@/Cf@ -HI{(Z9*/@/Cf@C-F(X)#\@/Cf@`-Ek(Xc)@/Cf@`-Ep(X>)@/Cf@ @-Fܱ(Y +) @/Cf@B"-G-(X)@/Cf@-Fʭ(Xz)t@/Cf@` -D9&(X)*o@/Cf@-C (Vą)q@/Cf@ATz-D0%(V)@/Cf@e-Ek(WE)@/Cf@`4P-F((W)W>@/Cf@ L-F(WT)/@/Cf@@8-F*(W)b@/Cf@ #-F(X)]@/Cf@_-E(WJB)@/Cf@H +-E(W.)@@/Cf@?-D(Vg)@/Cf@Q-Ck(U&)@/Cf@?-B3(T)Y@/Cf@!1-BM(TK)D@/Cf@$> -C i(U)4@/Cf@&-B(T-)F5@/Cf@)?-BX(T)@q@/Cf@,~c|-B(T,p)v@/Cf@/=m-@p(S}2) +@/Cf@1_mY-?(S )@/Cf@4 D-?U&(R)@/Cf@7|@-?1(RL)- @/Cf@:<!-?@(R)H@/Cf@<@W -A(R)(U@/Cf@?-A.(Rĺ)E(@/Cf@B{-@:(R:`) @/Cf@E;`->X(Q*I)@/Cf@G @-@/Cf@RTz-<#(O)@/Cf@Ue-=w(PHP)@/Cf@Xy`4P-<à(O0)=@/Cf@[8B-:*(NC)^@/Cf@]>--:*(NC)^@/Cf@`_-;g(O)8@/Cf@cxH +-;(N)c@/Cf@f7 o-:q&(M­)O@/Cf@hQ-:-(M})N@/Cf@ky-:;(Me)`#@/Cf@nv-:(M),A@/Cf@q6-9~(M3)o@/Cf@s?-8(LV[)M@/Cf@v q-8P7(L(=)@/Cf@yuc-7T(K8)@/Cf@|5@%N-5(K]S)G@/Cf@~@-53(J)u@/Cf@/+-5[(J)#W@/Cf@t@W -5 (I)W@/Cf@3-4Q(I&):@/Cf@-3U(Hde)i@/Cf@@ -10A(GO)!@/Cf@r"-.P^(Et)1@/Cf@2J-*[(C/)@/Cf@-%(@)6@/Cf@Tz-m](;)Ɂ@/Cf@q +|[-(5)@/Cf@1L-w(.8)@/Cf@>--(*&)@/Cf@_- +˚(/)s@/Cf@p-(6)?@/Cf@/ o-5(;\)9@/Cf@_ -!"(=4)@@/Cf@1-%w(@ /))@/Cf@n˰-*h(Bo)@/Cf@.?-,(C@)LB@/Cf@ q--3(D$ +)ݲ@/Cf@c--k"(Dw )@/Cf@m D-/(Ekx)@/Cf@,w6-0&(Edz)y@/Cf@`-/6(E)E@/Cf@ë-.p(D)H@/Cf@k--,(C@)3k@/Cf@+ @-.(D;)q@/Cf@ڲ-/(Dr)@/Cf@Ϊ` -/(E).@/Cf@i-/H(D) @/Cf@) +|[-.G(C)=@/Cf@L-.(C1)޾X@/Cf@٨>--.(Cy)^@/Cf@h_--U(CB)@/Cf@'--/(C!)Jz@/Cf@Q-,(Bm)AN@/Cf@y-+۪(B$$)s@/Cf@f˰-+ Y(AK)>@/Cf@&?-+h(A)gt@/Cf@Ex-+C(A)ۨ@/Cf@_mY-+(A])@/Cf@e :-*U(@ܶ)<@/Cf@$!-)E(@P)ـX@/Cf@ -*7(@) @/Cf@`-)I(?)K@/Cf@c@ -)((?)@/Cf@"ڲ-)C(@)t@/Cf@@-)9(?)ؚ@/Cf@Tz-'(>ӗ)@/Cf@a`4P-&*(@ /)f@/Cf@ 8-&[(=,)o@/Cf@ -&(>)լ\@/Cf@-&g(> J)@/Cf@_Q-%W(=&)Ӣ@/Cf@y-& d(=))ԤW@/Cf@ޟ˰-&&?(=)ԫ@/Cf@ -&(=)ԑ@/Cf@]m-%V(={)Ӣ@/Cf@! D-$(--j(.3)O@/Cf@@H +- (+)@/Cf@Q-Y(-J) @/Cf@ -(/)E@/Cf@~ -(0m)E@/Cf@=_mY-((0n)@/Cf@w6-k(0)@/Cf@-(07)@/Cf@{`-(0>)E@/Cf@:-(/0)!@/Cf@-U(/)+@/Cf@ȹ?^W-(. +l)B@/Cf@x #-R(,.),@@/Cf@7-(-)'@/Cf@y-(,)d@/Cf@Ӷ-U(-s)w@/Cf@uEx-i&(-)؃@/Cf@5 :-(-5)<@/Cf@@W -f(-4)&@/Cf@޳-(-9)@/Cf@rڲ-k(-)@/Cf@1-w(,)@/Cf@?^W-r(,U)@/Cf@鰀- K(+t)@/Cf@o o- +v()*)<@/Cf@/1- ɚ(*)@/Cf@?-&p(,9)@/Cf@c-fc(+h)E@/Cf@lw6-(+)@/Cf@,- D(+||)@/Cf@ @- d(*z;)s@/Cf@` - f())n,@/Cf@ie- j())s@/Cf@(>-- 4W(*p)v1@/Cf@- ^(*#)c@/Cf@ +?- tJ(*.) @/Cf@ f_;- R(*K)=@/Cf@%m- }~()M)&@/Cf@w6- W(()[@/Cf@9- dF(+Gs)k@/Cf@c @- f()w)@/Cf@"@- t()bM)@/Cf@ +|[- +[(([o)@/Cf@ #- ('g)v@/Cf@#_ o- (')dz@/Cf@&- >b(')@/Cf@( -1(&)@/Cf@+@%N-e(&D)@/Cf@.\`-(%c)՞@/Cf@1-,(%h)@/Cf@3ڠ-X($4)@/Cf@6 o-($ݰ)U@/Cf@9X8-($k)j@/Cf@<-E(#̭)@/Cf@>y-B(#1C)J@/Cf@A -(")@/Cf@DU@%N-(">)]@/Cf@G`-(!1)@/Cf@IӀ-&(Q)@/Cf@LJ,(r)@/Cf@OQe,A()~>@/Cf@R>-,Z()DT@/Cf@T o,2(G)|@/Cf@W,()k@/Cf@ZM q,( +t)v@/Cf@] :,Š( sy)|s@/Cf@_,()d@/Cf@bj,Ҷ(f)*@/Cf@eJ,(")@/Cf@h L,7()@/Cf@jH +, (O)c@/Cf@m?,HR(~)u@/Cf@pF?,()$@/Cf@s@%N,K(8)@@/Cf@u@W ,X(ރ)l@/Cf@x@ ,X()@/Cf@{B@-f()@/Cf@~?^W-(-_)Z@/Cf@?- (O )H@/Cf@?-*(()9@/Cf@>?-xp(t)q3@/Cf@ D,ڍ(2)@/Cf@ ,{E(L_)A@/Cf@{ @,x()@/Cf@9,g()W@/Cf@B,e( Y)k@/Cf@,z(F)4%@/Cf@v,(v)@/Cf@5 q,Y(x<){@/Cf@w6,/() +@/Cf@,2()@/Cf@r,'(W)a6@/Cf@1 +|[,T(r)!@/Cf@,()@/Cf@_ ,(d)@/Cf@n?,oq(X)GS@/Cf@- D,D()9@/Cf@,(0)o@/Cf@",(h),@/Cf@i o,;( z)O@/Cf@( #,(&)@/Cf@Q,(q)@/Cf@¦_;,<.(9)P@/Cf@e@%N,(tW)@/Cf@$ ,(.) N@/Cf@",<(Y/)+@/Cf@͡ o,Hz(+)@/Cf@` #,,(6)Z@/Cf@_ ,+()m@/Cf@?,-(x)@/Cf@؝ D,ٟT( )G@@/Cf@[,!w(|)k@/Cf@,($)P@/Cf@ـ +|[,S>(z)S@/Cf@?,7f()@/Cf@Wy,P( [)\@/Cf@ q,Z(k)s@/Cf@ԟ/+,M@();@/Cf@`,Y(^i))/@/Cf@R,()}@/Cf@8,(<)@/Cf@Ϡ',()@/Cf@_;,()A@/Cf@M D,(>!)@/Cf@ ,(>!)@/Cf@ʠ,(l)@/Cf@`4P,O(:)=i@/Cf@H,()H@/Cf@ + ,?(Sr)G@/Cf@ c,|()ƶ@/Cf@ ,M()e@/Cf@B",&()@/Cf@ +|[,y(5)l5@/Cf@?,V(5)@/Cf@~,J<(.)N;@/Cf@=m,i()@/Cf@@W ,(4)Oo@/Cf@"",(6) @/Cf@%y +|[,( |)ۑ@/Cf@(8H +,@( ê)@/Cf@*,T( ,) $@/Cf@-c,( N)U@/Cf@0t ,榲( )@/Cf@32ڲ,M( )e@/Cf@5?^W,( )@/Cf@8,4( _)@/Cf@;n,Ϟ( )pe@/Cf@>- D,5( )g,@/Cf@@,( E)@/Cf@C@,O( &)@/Cf@Fh8,( +)BU@/Cf@I'_ ,n( +#)@/Cf@Kc|,( +7)_4@/Cf@N!,N( +<<)@/Cf@Qc @,˴( +)^@/Cf@T!e,U( r)6@/Cf@VH +,U( -)#@/Cf@Y ,(W)@/Cf@\]@%N,()@/Cf@_,@(,)(@/Cf@a@,zS(3)b@/Cf@d>-,ݚ ():@/Cf@gW?,((<)@/Cf@jEx,ިB()*@/Cf@l@W ,N0(S)F@/Cf@oڲ,ݩ[()@/Cf@rQL,ݯ()@/Cf@u',(:)rb@/Cf@w ,(Tl)`@/Cf@z!,](()@/Cf@}Jj,L()@/Cf@ `4P,W()@/Cf@,O\()p;@/Cf@?,ِ(u)@/Cf@Dw6,s()%@/Cf@ @,w(؅)@/Cf@ +|[,ּ()n@/Cf@,(s)9@/Cf@>_;,Խ!()'@/Cf@w6,()u@/Cf@ @,()@/Cf@y +|[,5Q(d),]@/Cf@7,r(f)@/Cf@ ,ъ`')@/Cf@!,')A+@/Cf@r",9'9)l@/Cf@1?^W,9')/?@/Cf@Q, '3)@/Cf@ q,R' )>I@/Cf@l ,1'8)օ@/Cf@*J,@'x)@/Cf@>-,ѯN'ش)2@/Cf@y,ј';)$X@/Cf@e_mY,q')O@/Cf@#`,$'P)@/Cf@Tz,'))@/Cf@ĠH +, +';)%@/Cf@^_;,N9'ye)P@/Cf@w6,'N )@/Cf@",z'[x),w@/Cf@ϙL,϶'l)@/Cf@W_ ,>'x)@/Cf@m,I'A)@/Cf@,a')u@/Cf@ڑ,='AW)ˬ@/Cf@P?,7')a@/Cf@,͕'Ѽ).@/Cf@̟/+,ۄ'h.)@/Cf@势ڲ,') p@/Cf@HB,̗')@/Cf@y,' )ô@/Cf@@%N,,w')L@/Cf@,'D)%-@/Cf@Ae, + +'-)N@/Cf@ o,ɳ'М)@/Cf@ q,\4'a)@/Cf@|,')F@/Cf@:,DŽ9':_)a@/Cf@?,Ƈ't)~@/Cf@?,ł')|g@/Cf@t`,'T)}q@/Cf@ 2J,Ũ'8)}@/Cf@ ,'o)z@/Cf@˰,9Y' )y@/Cf@lw6,ç<'D)zo@/Cf@*ڲ,V+' )z@/Cf@>-,L')yL@/Cf@,s')yJ@/Cf@d@,'0 )w@/Cf@"",'8)v@/Cf@!8,p'/4)v +@/Cf@$,#8')w7R@/Cf@'\@,!')w@/Cf@*",e'dJ)x@/Cf@,8,"' )x}@/Cf@/,'0:)w'.@/Cf@2T@,('I)w>=@/{Cf@5ڲ,',!)v@/|Cf@7>-,'!)v@/Cf@: ,U'W)v0t@/{Cf@=L/+,$'-,'R )l}@/pCf@,'|'T)lW@/pCf@@W ,*'ץ)l=@/pCf@Q,@'|)k1@/lCf@','׋)k@/lCf@_mY,/'o')kJ@/pCf@ @,';)k@/lCf@H>-,<'֎)j@/kCf@,]7')im@/lCf@ ,'`)i @/hCf@ o,$' +)hW8@/kCf@?Q,.'Н)g@/lCf@ D,:H'ԉ)g@/hCf@ڲ, ?'1 )g @/gCf@x_,2_' +)f@/hCf@6c|,%' +)g@/hCf@`,'x)gO4@/gCf@?^W,'a)f9Y@/dCf@n ,')e%@/dCf@,`,'ѫj)d@/gCf@,r'Ѷ{)ds@/cCf@Q,?'n)d@/dCf@e D,'џl)d*@/dCf@",J.'9 )d5@/cCf@?,'ЛS)c#@/cCf@ÝEx,.t'd)b@/`Cf@[@ ,F'ώ)a^@/`Cf@>-,'6a)`@/_Cf@?,T')`lb@/cCf@Γ,c'Π +)`@/_Cf@Q?^W,8'kw)`@/\Cf@ ,t3'Ε)`@/`Cf@@W ,q'@)`G@/_Cf@ى o,>(')`O@/_Cf@G?,*W'1)`62@/[Cf@/+,/'-B)^@/[Cf@, '&e)]K@/[Cf@Q,b'˦0)\d@/\Cf@= :,a'ˌ])\<@/\Cf@` ,'r)]@/XCf@ o,b')\d@/XCf@u D,'zn)[@?tCf@`,'M )^L5@Da@1L, ')]`@Da@1,(l')[@Da@,w6,B'o`)Y@Da@,U'!)YE@Da@(,' )X%@Da@?,Gp'=)X\@Da@$ ,'w)Y)@Da@,o'~v)X>@Da@$ ,'|)W@Da@) q,ی'{)V۔@Da@/,,'yA")TT@Da@4 o,'xl)TE@Da@:',xx'y`)V\@Da@?m,'x1)Un@Da@E,x'vĸ)S +@Da@J +|[,!}'u#)S^@Da@PQ,i't)R>@Da@Uc,k'sg)Q-@Da@[ ,['qS)Oѐ@Da@` +|[,'qm)Pr@Da@fQ,_!'qk)Q@Da@kc,;'o)O@K@Da@q`,G'n+)Np@Da@ve,'n3)N@Da@{ o,*'mb)N5@Da@}Ex,A'k)M @Da@,'j١)Lx@Da@y,)'jB])K@Da@,$'h)Jl]@Da@v ,u'gC)I@Da@@W ,'f)H@Da@rJ,'e3)Hw@Da@ #,:'c͉)G@Da@n ,V'b)F|@Da@@,'a)F>t@Da@k @,']%)E<@Da@?^W,2'^)()E_@Da@gQ,ME'^)()D9@Da@m,J'\(V)A@Da@c, 'ZD)?@Da@,'Y)@!@Da@`_,'Yp)Am@Da@ޟ˰,x'W)?@Da@\@,b'VL)=)@Da@ @,f'T))(ż@Da@͟m,Q'H)1 @Da@L,D'I)4@Da@ ` ,]'Iގ)6n@Da@&H>-,'IJ)6I@Da@+y,7'I)6@Da@1Ec,Dj'Ho)7a@Da@6,U'G)7w@Da@-,.'E5)5@Da@G?y,U'Ej)60I@Da@Lm,x'E&K)6]@@Da@R<, 'Dw )5@Da@WJ,'Cl)4@Da@]8B,&.'B8)4@Da@bQ,0'A)4@Da@h6c|,)'A<)3g@Da@m`,&'@)38@Da@s2j,Ѷ'@-w)2@Da@x +|[,''?)1@Da@~0, 'A)0@Da@,'=])0@Da@- :,'=c)0@Da@`,';A)/LQ@Da@*,y ';m).@Da@>-,/';]9)/ @Da@'?,u';^).@Da@ q,':Q)-J@Da@$!,j'9')-T}@Da@ @,p'9Z),@Da@!e,'8n),5j@Da@?,'7)+@Da@,'6)+ @Da@ŝc,Oa'6\)* @Da@@W ,cc'5))t@Da@К",#'4j)(@Da@ +|[,_'4K)'v<@Da@ۘH +,o}'3)&@Da@, '3)'@Da@c,k'2 +7)&@Da@@W ,p'1Up)&@Da@j,'19)&@Da@e,%'1)%O#@Da@_,'/)$~u@Da@y,~P'.)#@Da@ q,}"'.Q)"p@Da@ /+,|'-F)! @Da@`,}'-)"e2@Da@ +,{.e'+%) @Da@8,y9'+)@Da@#',yE'+c)@Da@(_;,yT'*[)@Da@.@%N,xY'))Y@Da@39,v8a'(J_)@Da@9",ut'(n)@Da@>e,vZ''Ŏ)}@Da@D,tii'&g)ll@Da@I?,r'&e9)B@Da@N ,r!'%)@Da@T} :,skh'%)@Da@Y,rm'$)8@Da@_z,r'$m)G@Da@d +|[,rx7'$8!).7@Da@jx_,q'# +)P@Da@o?,ql'#0W)@Da@uv?,qG'"*)k_@Da@z D,p'!ӳ)@Da@s9,nO' )@Da@",m'+)b@Da@qTz,l&')"@Da@>-,l~'`) @Da@o o,kG$'u)@Da@˰,jE')@Da@mm,i;'nr)@Da@!,i0'>|)w)@Da@k,ix,'p)k@Da@J,i<')@Da@i +|[,h<'>)@Da@,gMT'?)@Da@gQ,f.'z!)Q7@Da@,f')A@Da@ec,e_')̡@Da@!,dq')@Da@c,c~'p)@Da@J,bY'/)@Da@a +|[,a&'W)W"@Da@ #,`'j)@Da@_',^j'd3)@Da@ ,_I}';)Z@Da@]Ex,_ ' )@Da@@,^+')@Da@[,^o')[@Da@ j,\'h) 2e@Da@Z,Z'z) %@Da@L,[AC') R@Da@X?,Z'6) %@Da@_ ,Zb'/) *@Da@%V,XkH') +@Da@*՟m,TH '\5)/@Da@0Tw6,R')z<@Da@5,PD')s_@Da@;Rj,P')H@Da@a D,R)'j)@Da@gL`,R87' )a@Da@lˀ,QX' )j@Da@rJڲ,Q{' )R@Da@w,PU' )@@Da@}IL,O' FG)@Da@_,Ov ' )o@Da@G',O ' ʯ)B@Da@,M' )@Da@F ,L[' +/)@Da@_mY,L' +GM)'a@Da@D/+,L' +<<)@Da@,K' +_H)O@Da@C @,K' )_@Da@` ,J' )@Da@Ae,IL' ))@Da@8,ID'')@Da@@H +,I +'5)@Da@ĿQ,Hp6')G@Da@> ,G T'!(b@Da@Ͼ ,F'Ӵ(@Da@=_mY,Fw'}( @Da@ڼ/+,E'A(]@Da@<,EU'ĉ(J@Da@@ ,Dx3'Z({#@Da@:,C'PR(T@Da@,Cfb'k(@Da@9?^W,CY'֓( @Da@ #,B7'#(z@Da@8,A'Q(1@Da@_ ,BP'H(@Da@ 6 ,A|''(?W@Da@c|,A:'7(i@Da@5_mY,?'a({@Da@w6,?c'>}(Q@Da@"4 ,?c' (Q@Da@',>'(@Da@-2",='( @Da@2` ,<@':(p@Da@81 o,<<'qR(M@Da@=L,< '2(Y@Da@C0,<'F(@Da@H,<&(@Da@N/_ ,:&4l(ռ@Da@S ,9ۡ&٠(@Da@Y. ,9&(@Da@^m,9y&>(H@Da@d- :,8_&m(e@Da@i!,8A&_(h@Da@o+,7-&)'(@Da@t`,6؝&t`( @Da@z*",6h&(@Da@@,5k&I(Ӽ@Da@) o,5*&w>(@Da@?^W,4^&t(=@Da@( #,4l&p($@Da@H +,3&L(@Da@'',3^&(@Da@y,2%&(@Da@&˰,2y&ϕ(s@Da@c|,1&J(w@Da@%c,1Y&#(c@Da@ :,1oN&t(@Da@$!,0&!](>*@Da@9,/e&Kb()w@Da@#`,/[&n(@Da@̣ @,.5&4m(@Da@",.-=&HO(:@Da@ע,-&(ރ@Da@!e,-&(u@Da@L,,1&(g@Da@ >-,,i&(ܯv@Da@?,+6&ŋ('w@Da@ o,*T&[($@Da@?,)L&)( @Da@,)`&qN(ͤ@Da@_;,(&P(@Da@ c|,(&()@Da@c,(R&o (@Da@ D,(]&U9(:@Da@/+,'&(֛@Da@@W ,'!&ޯ(@Da@$,(N&ߧ(o'@Da@*`,'&(@Da@/",&Z&h(դJ@Da@5J,&45&۬(Խ@Da@:,%S&4n(ӝz@Da@@e,$|&٫(ҋ@Da@E?^W,#?&F(A@Da@K8,"y&"(%@Da@P,"E&W(@Da@V,"^l&Յd(@Da@[',"&))(F@Da@a?,"Z&m(a@Da@f,!O&[(@Da@l,! &҂Q(#@Da@q , Z&E (@Da@wEx,&o( @Da@|_mY,V&?(4@Da@ :,&(̧@Da@/+,&+(H@Da@@W ,L&ͮc(@Da@,~&E(?Y@Da@,I&˭(ʛ@Da@ @,]&a(m@Da@ڲ,=&ȁ(-@Da@` ,&٣(T[@Da@,0&ǒ(~@Da@ o,&݇(4@Da@`4P,&(:@Da@B,M&-(O@Da@ #,&T(;@Da@ɐ?,p3&R(ŮV@Da@,& ((@Da@ԏ',u&K(&e@Da@?,I(&<(R@Da@ߏ1, &(1@Da@˰,W&(@Da@?,R*&(@Da@c|,&j(@Da@m,|&3(@Da@ _mY,R&R(!@Da@ :,9U&(@Da@ /+,G&*('@Da@ `,`&(4o@Da@ ,[&o(-@Da@,o`&c(F@Da@ `,t&*0(@Da@! @,I&<(@Da@' +ڲ,7&:a(@Da@,J,f&(@Da@2 +@,s_&(@Da@7Tz,Ģ&(@Da@= e,& (a@Da@B?^W,&/(@Da@HB,&j(@Da@M>-,Q&X(@Da@S_,w&a(U@Da@XH +,̞&(y@Da@^, KN&(@Da@cQ, +&i(@Da@i?, W&(@Da@n1, `&H(@Da@t˰, &0(g@Da@y_;,e&(E +@Da@ ,,&(|a@Da@ q,1&X(+@Da@c, &H(lJ@Da@@%N, [S&L(@Da@ :, &(Q@Da@w6, &B(UV@Da@!, +&t(@Da@ , & >(l@Da@, "v&e9(E@Da@`, ++E&H(:@Da@`, +&(l@Da@ @, +I&l(m@Da@ڲ, k&uj(8@Da@ƂJ, a&(@Da@@, s&(@Da@с, & +l(@Da@ o, &()@Da@܁ +|[,0& (H@Da@?^W,JI&c(s@Da@B,8&(Ʈ@Da@ #,&O(h@Da@_, &0(@Da@H +,2=&U(@Da@+&<;(s@Da@'+p&v(3@Da@_ +&D(@Da@ y+&](<@Da@~,&H(@Da@˰,&(@Da@~_;,k&(7@Da@# ,2&d(@@Da@)} q,IE&(S@Da@.c,&r(q@Da@4}@%N,KD&|b(k@Da@9 :, &Ll(@Da@?|w6,= &4(@Da@D!,X&(@Da@J|@W ,[&&(P@Da@O,XD&#(@Da@U{,,&<(@Da@Z+H&R(a@Da@`{@ +x&(@Da@ej+&s(@Da@kzڲ+&(@Da@pJ+~&0(?@Da@vz@+=&m8(yO@Da@{+P&(\@Da@y o+Y&6(@Da@ +|[+&_(@Da@yL+&(3@Da@8+&ѧ(@Da@x #+&(#@Da@_+,&P(@Da@xH ++&(g@Da@+/&a((@Da@w'+;3&q(%@Da@_ +ڃ&4r(}@Da@wy+&( @Da@+"&ԅ(*@Da@v˰+z&(:@Da@?+&{(ט@Da@vc|+&Z(tq@Da@Ex+{&(n@Da@uc+#'&+(?u@Da@@%N+C&(( @Da@u :+ɫ&H(@Da@w6+@&(U@Da@t`+$&\(t@Da@ +7&ʹ(8@Da@s+.&(P@Da@`+@&c((@Da@e+:&0(B@Da@yc+48&K(π@Da@9_mY+I&( @Da@ +@%N+ۦ%&)(K@Da@ D+ޞ&(y@Da@xB+&(4d@Da@88+,&b(@Da@>-+[&@(x@Da@!+(&D(8@Da@x`+&(@Da@8@W +ã&Q(?@Da@ H ++ &Q(m@Da@#+%N& +(6@Da@&w+IU&A(M@Da@)7+&(#@Da@++.&M(2 @Da@.`+3&>(@Da@1w?+様&(v@Da@47y+4&9(d@Da@61+ &.(9@Da@9ڲ+֡&(@Da@-+i&߷(9@Da@`4/++Ҏ&(@Da@b`+V6&~ (Ɖ@Da@e@W +&zf(~}J@Da@htH ++&x(z@Da@k3+&y{(|OF@Da@m+~&{(@Da@p`+̗&}h( +@Da@ss`+'&(#@Da@v3?+&_(S @Da@x1+8&_(.@Da@{"+M&+(;@Da@~rڲ+(&C(\C@Da@2J+M&+(;@Da@_;+c&F(z@Da@ +)&P(>.@Da@q+"&#(g@Da@1 o+48&H_(π@Da@e+[&d +(@Da@_mY+W&Vd(@Da@q@%N+Ӡ&0d(p@Da@0B+&~ԋ(W@Da@8+&~'(@Da@/++cs&~( @Da@p!+ыG&~(@Da@0@W +14&}( @Da@H ++В&|p(|@Da@+N9&~n(P@Da@o`+&|X(@Da@/+n&{Y(@Da@?+C&zh(o@Da@y+]&yy('@Da@n"+&x.(+@Da@.+&x)(+@Da@J+Vh&w5("@Da@?+=b&w<(n@Da@nc|+9&vfF(@Da@- o+J&uj(T@Da@e+&t!(~@Da@ȭ_mY+Ǹw&s;(p@Da@m D+2&t$( @Da@,8+S&sՑ(#O@Da@>-++&sQ(~@Da@Ӭ!+t5&rP(~b@Da@l@W +"&rH(}@Da@,+M&p(|@Da@+:&p ({,@Da@ޫ`+£&p>(y#@Da@k`+-S&m(u@Da@+y+`Y&k;(rf@Da@"+v&i(n:@Da@骠+&g(jc@Da@j_;+&d3](d@Da@* +a&` (]N@Da@Tz+&[Y(U7@Da@e+ *&YK(Q@Da@i_mY+ԉ&\(U@Da@) D+&_f(]W@Da@8+7D&c"Z(d@Da@/++:&gK(i@@Da@h`+&f(lb@Da@(H ++V!&g;(n@Da@+&g(ow$@Da@ +`+v&g(o[y@Da@ g`+s&f(oDj@Da@'1+&gr(o`@Da@ڲ+G&g6p(o@Da@J+&f/(o?@Da@f?+ +&e(m_@Da@&c|+[&d(l/@Da@ o+9&d(k@Da@ _mY+&g(m@Da@#e D+9&dfG(mf@Da@&$8+$&c(kM@Da@(!+g&c\(k;e@Da@+@W +&c(kW@Da@.d+D&c(k@Da@1#`+&b[ (k(@Da@3`+&bW(k-@Da@6y+0&aCy(i@Da@9bڲ+ƙ&ai(i@Da@<"J+)&a؆(ig@Da@> +&a(hu2@Da@ATz+&_(hg\@Da@Dac+ r&_(g@Da@G!@%N+_&_](gIp@Da@I8+Z&^-(f@Da@L/++&^+(f"J@Da@O`@W +bN&]K(e@Da@R +&]@\(d@Da@Tߟ`+IH&\pH(d4@Da@W?+&\[(c\@Da@Z_1+&\3(c2@Da@]+_&[.(c @Da@_?+ l&Z(aW@Da@bc|+&Z(_ @Da@e]e+nI&Y8(]@Da@h@%N+1<&Xa\(\g@Da@j8+Q&W/([J@Da@m/++&&W([ݡ@Da@p\@W +&W([=@Da@s+3&V(Y@Da@uۀ+ +&Sx(T@Da@xy+S&P(O3@Da@{Zڲ+S9&RP(RV @Da@~_;+L&T_(V.@Da@+du&TН(W@Da@e+&TJR(V@Da@Y@%N+н&Sc(V;@Da@8+&S(V@Da@!+&S(V@Da@H ++&Sh(Vl@Da@W+$&SXM(U@Da@`+&Rҿ(U7@Da@1+~&S$(UE@Da@+&S^(UX3@Da@V?+(&R2(Tm@Da@ o+e=&Q@(Rm@Da@_mY+Y&P(PS@Da@B+&O(O<@Da@T/++C&PS(P@Da@H ++&P(P@Da@ӿ+&Npr(Me@Da@`+&M(KF@Da@R"+Q/&Kb(H@Da@J+&Is2(CO@Da@+&F(>@Da@e+&Cx(8e8@Da@Q D+6&?(1y@Da@>-+ &>u(0@Da@@W +,&B|(6|a@Da@ŏ+}&E(<;@Da@O`+9&H=J(@}@Da@1+Ӭ&Hƈ(CL@Da@΀J+&J(F{@Da@Ўc|+F`&J(H)@Da@Me+T&K +(H@Da@ D+&Ky(IF@Da@̟/++%&LI(Jm@Da@یH ++y&Kv(I*@Da@K+&K(J@Da@ ?+D&KU(I@Da@ʿڲ+F.&J(IO@Da@?+I&K(Hh@Da@I o+o&Jz(Hde@Da@ @%N+&J[(HV@Da@>-+&J[(G @Da@@W +&I>(FR @Da@G+&Iz(F:@Da@?+j&I4(E@Da@ƿڲ+&Hׁ(E&K@Da@?+&Gg(CT@Da@E o+ &F~$(@a@Da@@%N+&Fn(@@Da@ğ/++&G$(A&@Da@H ++U&G; (Av@Da@ +C`+g&H(A̅@Da@ y+vc&F(@@Da@€J+ &FVK(@@Da@+`&F(?4o@Da@Ac+a)&Dk(=@Da@8+V&D(<@Da@`+&D(= @Da@+&EA(=@Da@ ??+ߧ&D>(;@Da@"+&D(;}@Da@% +z&DSC(<$s@Da@(}c++P&D2(<`g@Da@+x8+o&Bv(6@Da@A8@W +j&@}(4@@Da@C`+*1&>(8(0@Da@F1+q;&;(,@Da@Iv_;+ B&7($T@Da@L5 o+oH&1%($g@Da@N D+f &,7(b@Da@Q!+o&1n( @Da@Ts+&7(#@Da@W3?+mv&;q(*ɍ@Da@Y+&&='(.E@Da@\Tz+U&>,(0@Da@_q@%N+&?(1դ@Da@b0/++c&?[(1j@Da@d+r&?UE(1r~@Da@g?+*&?(2"2@Da@jn+&?f>(1֐@Da@m.c|+\o&?(2a@Da@o@%N+ZF&?|(2_@Da@r/++&@]n(2@Da@uk+;&AH(2U@Da@x+?+:&?c(26@Da@zJ+A&?(2?@Da@}Tz+q+&? +(14@Da@i D+&>(1N@Da@(`+33&>`(0`@Da@+$&>'(0Jl@Da@1+52&>*(/@@Da@f?+&>}(/e8@Da@%c+L&>r(/@Da@8+n&=(/Z'@Da@H ++J&=z(.t@Da@c`+&=(-Z@Da@"+-+}&5J(!I@Da@ +{3&4( Ĉ@Da@1+y&3(@Da@ +wb&3$(S@Da@I@%N+sF&1¤(@Da@`+n3v&/B(r@Da@ǀ+iW&-(V@Da@+`.&*( @Da@Ee+U&%V(@Da@>-+M^&"(o@Da@+O&#S(6@Da@"+Xh&''H( +}@Da@A+a&)(@Da@B+gw&,(#G@Da@ H ++kl@&-S(@Da@ y+n &/XO(fg@Da@>?+p6&/e(-@Da@@%N+r&/(@Da@@W +rp&/()`@Da@{`+rL&/(0@Da@:_;+rV&0#(Q@Da@_mY+r@^&/`l( +z@Da@`+q8&/9Q(a@Da@"w`+qCr&/g(h@Da@%6_;+p&/?8(@Da@'_mY+pB3&/(@Da@*`+o &.(@Da@-s`+oWK&.(-@Da@02_;+o&.a(d@Da@2_mY+o&.+B(@Da@5`+n &-Z(fg@Da@8o?+lֳ&-u(@Da@;.?+kD&,(@Da@=@%N+l &,(@Da@@H ++kb&,2(@Da@Cky+le&,(K?@Da@F* +l&&,(# @Da@HB+j&,,(&@Da@K+j&,>\(8c@Da@Nf"+j&+(@Da@Q% o+ik&+9'(0@Da@S>-+j U&+(@Da@V`+j&+J(ö@Da@YbJ+j&+9(@@Da@\!_mY+i&*j(-_@Da@^@W +i&*o(@Da@ay+h&*a(@Da@d^ +g&* ](0@Da@gB+g=&)(`'@Da@iۿ+fX&)]:(@Da@l+f%&)4(Ks@Da@oYc+e&((@Da@r`+et&(&(@Da@t?+d'&(((@Da@w +c6&((`@Da@zT8+cc&(7( @Da@}+c_g&'ע(@Da@ҀJ+c,&'d(I@Da@_mY+a&'(c@Da@PH ++a&'3(i@Da@1+a&&r(G@Da@ o+`&&~O(@Da@/++`&&(@Da@K`+`|&&mW(@Da@ + +_c&%L(@Da@8+]b&$( @Da@+\v&$( R7@Da@FJ+\&$k( o@Da@@%N+]~&$&( @Da@+]N&$( @Da@ڲ+[{&#&( @Da@Ac+\&$( e @Da@H ++\=w&$=( #@Da@"+[.&#( F@Da@}e+[r&${( r@Da@<`+Z1&"( @Da@1+X&"j9( +@Da@ o+Y[&"( @Da@x!+Y&"٤( +@Da@7y+WT&!( @Da@Tz+V&!m( `@Da@!+T& j(_@Da@sy+Ub&!%(1@Da@1Tz+V^X&!F( 2$@Da@!+VGI&![( #b@Da@̯y+UV& (Z@Da@m o+T& (<@Da@,`+T& (@Da@1+T}& =(@Da@שe+S& =(@Da@h@W +ST&(@@Da@&"+Rd+&F(@Da@c+Q&x(e@Da@H ++R5U&=(@Da@bڲ+Qp&(-+M&!(h@Da@W?+N&*(@Da@ o+L5&($@Da@@W +Lj&Ae(@Da@"+L&(W@Da@Q_mY+K&(^@Da@ +J`&Ll(@Da@ _;+J&j(@Da@8+J&O(@Da@K`+J^&('@Da@ Tz+I&v(p@Da@@W +IP&Ĩ(@Da@ڲ+H3&(!@Da@E@%N+Ga&'@Da@`+G& s'@Da@! +G]&1'w@Da@$/++G&'L@Da@'?1+G]&2'09@Da@)c+F"&'=@Da@,+F &'|@Da@/z?+E&A'"}@Da@28/++Dz&&'@Da@4y+E &U2'R@Da@7c+D&:'p@Da@:s+Dz&R'@Da@=2?+CiD&R' W@Da@?/++CiD&' W@Da@B1+C~)&';@Da@Em_mY+B&'%@Da@H+`+B4&\'@Da@J+A+&E'@Da@M`+@]&'0@Da@Pfڲ+?&'p@Da@S$B+>߲&B'Q@Da@U`+=p&'{@Da@Xe+>n&'i@Da@[`+@2^&y'@Da@^?+@&E'J@Da@`!+>&'٠@Da@cڲ+=&'@Da@fY D+=&' @Da@i`+=J&'}@Da@kՠe+=&E'}@Da@n+=&'@Da@qR +=p&'R@Da@t`+=&'@Da@vΠ+<|&vG'Cc@Da@y>-+;H&1'{>@Da@|K1+;&'q@Da@ @%N+9&q'A@Da@`+:+&'LN@Da@e+:(&}'G@Da@C+9!&'@Da@+8&'`@Da@H ++8S&5'-@Da@~?+8_&f'F@Da@<!+8D&"'@Da@+8]&<'*@Da@>-+7;U&0'c@Da@v"+7 & ?'-+L& 'b +@kDa@._;+x& F'q@kDa@1DH ++,& $'8@lDa@4 o+& Q'M@lDa@6+& g''ˌ]@lDa@9} D+&k'al@gDa@<:ڲ+ &'͔@kDa@>!+!K&'u@lDa@A + }&p'l@gDa@Ds+ &'͖h@gDa@G1_mY+ i&'S@hDa@I1+ &'@Da@N?+%'0!AD@S@%N+%?'&AD@X`+G%K'AD@]g+M1%$'ʠ?AD@bFJ+%.'ɾFAD@g%e+%&+' CAD@l>-+i%h~'5[AD@p+ %7'Ǿ`AD@u1+(% 'jqAD@z?+6%'ƬqAD@_mY+%-M' AD@`!+L%J' +AD@?+C\%1'-CAD@1+8%h?'mAD@?+]%'ïAD@c+ % '*AD@>-+!%X'9AD@+%'[AD@{?+U%3'AD@Z+A%'T4AD@9Tz+B%p'AD@@%N+]%'nAD@!+h^%5!';AD@+%Q'AD@÷?+_%=5'ZAD@Ȗ+I%'AD@vc|+%5 'AD@Uc+B%;'ʓAD@48+l%='$AD@`+ G%°'' AD@+%g'8oAD@?+;%'gAD@겿ڲ+W%?'M=AD@?+%S'AD@q o+%Ħ'FAD@Q@%N+I%' %AD@0>-+ q%' ^AD@@W + ei%V'AD@+ y%'AD@ `+ !%'^AD@1+ %'iAD@+ +%y'AD@n?+ +R%'_AD@ MTz+ +!%'AD@%-c+ "%'}AD@* D+ %R'AD@.>-+A%Ë'@TAD@3`+Gh%Q'oAD@8H ++<%E'b AD@=+B%'AD@Bk+ۄ%g'AD@GK?+i%_'AD@L+1+ %S'AD@Q +ڲ+-+%f*'AD@h/++%d'AD@H/++b%+"'UAD@(!*B&%']AD@!*%ƴ'\4AD@!*%o'^AD@`*%eN' AD@`*%'"AD@`* %'nAD@h`*%R 'AD@H!*i%t'nXAD@(!*%l'%{AD@!*7E%0'۱AD@/+*%8' AD@ȟ/+*|%%'gAD@Ԩ>-*5+%4'JAD@و8*%i|'$AD@hB*<%"'^AD@I D*Ɨ%zj'AD@)@%N*%ُ'AD@ _mY*%',AD@c*#%'N+AD@ɠe*G%P<'LAD@Tz*,%3'AD@*G%_'ۗAD@j?*ث%'8AD@ +JJ*%_'AD@*ڲ*%'nAD@ 1*"%'|.AD@?*鋠%:'xAD@ˀ*B!%u!'I AD@"*Z%'XAD@'*%'{\A +D@,l`*%M'CA +D@1L/+*(%k'A +D@6,B*%L'=(A +D@; @%N*.%'PA +D@?e*6%'A D@Dc|*T%f',A D@I_;*%T'/lA D@Nڲ* %*'MA D@Soy*%'A D@XO*%Y'_A D@]/*[$%'AD@b`*\%7'AD@f>-*\%tE'AD@k@%N*&%yd'"AD@pe*f%%'UAD@u *%D'tAD@zr*(%q<'݊AD@Sy*k@%sh'AD@3`*R%">'AD@H +*QP%A;')AD@/+*%'bvAD@ D*أ%='MAD@ o*rS%'AD@?*Ma%y'nAD@v"*%'AD@W`*6%&' AD@8*w%O'AD@/+*%'AD@@%N*E%'ڒAD@Tz* %J'AD@J*K%H^'AD@Ûy*4%`'AD@{*wL%'kAD@\`*Ϸ%'fAD@= D*ςM%0'FAD@ o*F%F'xAD@J*Δ%y'6AD@y*{|%';AD@*%'AAD@ꠟ/+*̞%7F'AD@_mY*%f'AD@a*~%Ы'gAD@B"*M%'IAD@#`*;%'n AD@`*%;'DAD@ D* L%'ZAD@ Tz*C%Ж'+9AD@ڲ*Ǡ%Y 'AD@*TN%m'$;AD@h`*Ɩ%'~0AD@ I@%N*9% +'}`IAD@%* *} %Z'|AD@* +"*Q%XF'|3AD@.*Į%C'{-* %z,%'\7A*D@ *[9%zx'[V A*D@`*v%x'ZSA'D@ +|/+*J%wa'Y_A'D@]Tz*w%v'Y2A*D@??*%v'YA*D@ !*w%w'YXA*D@Tz*%uW'WA*D@"y*%v9'XB^A*D@'!*7%u6'WPA,D@,Tz*=%uz'VbA,D@1?*5%t'VA,D@6h/+*%ur'VA,D@;I*%>%s''U:A,D@@+`*i%sAB'UlA,D@E >-*G%r&'T*A,D@I *|%r'S A,D@Nπ*|%r 'R9A,D@S8*%q'R%A.D@X_;*"9%q|'RTA.D@]s*8%pɤ'P}A.D@bU@%N*%p'PQA.D@g6*q%oמ'N4A.D@lH +*%o'MA0D@pe* %n'L3A.D@u1*r%m'J5A.D@z!*%l_9'HA0D@c|*%kO'GWA0D@*>%k'FA/D@`B*߈%kΚ'Fz A/D@c*%b('8GA2D@y*1e%]ɚ'0A2D@ȟ/+*%\|'.A0D@ *%`1'3[A2D@*&%da'9A2D@m@%N*7,%g'>A2D@N"*y%iB'AA4D@0!*!%j'Cx|A2D@*e%kF'DYA2D@`*<'%j|'D$ +A4D@@%N*%l$2'E>A4D@Ͷ"*'%kL'DzA4D@Ҙ!*6d%kp''DA4D@z *\%j'CA4D@[* +%ka'CA4D@=_mY*܃%j'BaA4D@1*8%izg'B A4D@/+*%h'ARA4D@?*ZI%h '@sA6D@*%f-'>,A6D@e*a%f'=A6D@`*%f'=p}A5D@hB*%e!'<A5D@Jڲ*%d';A6D@ ,`*%c'9A6D@ *A%b('8@SA8D@*/%b'7GKA8D@Ѡe*%bz'6vA5D@ ?*x%a'5\A5D@%B*m%%bZ'6NCA8D@*vڲ*%`׿'6nA8D@/X!*%`'6dA8D@4:?*o%a'7vA:D@9H +*4%a}'7LA:D@=Tz*EQ%ar'7bA8D@Bߟ`*$%a0J'6˳A8D@G_mY*%a'6A:D@L?*%`G'6~9A:D@QB*%`'6zA:D@Vf"*J%`׿'6xA:D@[H/+*@x%` $'6A:D@`*J*5%_'52/A:D@e @W *%^'4iA:D@i *ψ%^n'4^%['0A<D@y*W%[%'0A<D@B*%[4y'0A<D@"*%["'/TA<D@>-*u%[=S'.A>D@ڲ*%Zt'.A=D@/+*%Ye'-A<D@fJ*%YMz'-A>D@H!*x%XŶ',\A=D@*_;*{%X',#mA>D@ `*%X'+1A>D@?*uc%X'*ӲA>D@@W *%X#a'*A>D@Ʋ * %V~'(A=D@˔H +*%U\''A@D@v *%V*''#9A@D@XH +*%Vq'%ļA=D@9*Wa%S'$FA>D@*:%S%c'#XA@D@c|*|3%QJ'!hA@D@*z%Q' j$A?D@*y7%P'A@D@*{3%Q' ĈA@D@c|*|%R?,'!YA@D@h*}HN%R6Q'"ABD@J *}%Q.'! ABD@,H +*}9%Q'"A@D@  *}Ҩ%RM'"rWA@D@H +*|%Q'!ABD@?*}9%R'"ABD@@W *|O%RP'!ABD@_;*|ƚ%R9E'!ABD@#x`*|g|%QB'!ABD@(ZJ*{8%QG?' 8ABD@-<!*{n%Q' ABD@2*{#%P' ADD@7>-*z%Q' ABD@;ڲ*y%QV'ˀABD@@8*y%%Qj'jADD@E1*x%PC'0ADD@J D*w%s%O',sACD@Ok?*w\7%Oě'OACD@TM_mY*v %N'yADD@Y/*u**%N 'ADD@^e*tk%M^'n ACD@b*s޳%L'ACD@gTz*rZ%L8'=ADD@l*r%M'4qADD@q *r%M'bADD@v|@W *qj%L]'DAFD@{^J*p%K8'AED@@/+*o?%J'IACD@"ڲ*n^%J8'ADD@8*l%I'AFD@y*l%JM'jAFD@@%N*k %IR'oEAFD@*h%H`'PAFD@e*is%I'AFD@o*j%I'd3AHD@R *jc%H1{'oAFD@4@W *i,%GZ';5AFD@J*hH%Ge'zAHD@>-*h=H%HI'AED@"*f%G'AFD@ D*fJ%GG'AHD@ğ`*e%G{'"hAHD@Ɂe*d%EŬ'jAHD@c*d%F'f@AHD@F *b{%E'@AHD@(`*b%Ea'$AHD@ +*b^%E'AHD@8*aS%Ea'5xAHD@y*`J`%Ex'AHD@_mY*_XB%E)>'AHD@`*]E%C' cAHD@uTz*[*%BAx' DoAHD@XH +*Y%A' +AHD@:_;*V%@'CAJD@>-*S%?'_AJD@1*Q"%? 'AHD@ @%N*Q%@ 'EAJD@ß`*SC%A@'5xAJD@Tz*U'%A'k`AJD@@W *V[%B6' |cAJD@!jJ*W%B)' \AHD@&L>-*X];%B=' +yAJD@+/y*X%C' +AKD@0c*X%C' +AJD@4*X%Cޭ' +AJD@9 *XI%Dr@' +l2AJD@>`*WeZ%Dm' wAJD@Cڲ*W:%DӦ' ALD@H} D*W%E' ALD@M_*V(M%C"' AJD@RA o*T!%B'F{AJD@W$H +*Tlm%B'yALD@\J*S%B't1ALD@`8*S%C'AKD@e?*T*"%D' ALD@je*St%E,2'TAJD@o*R%Dz'qAJD@tr?*R+%E'AKD@yT/+*Qg%C'ALD@~71*P%F*'ALD@_mY*P%B''AND@*O%C'AND@?*OL%C-'%AKD@/+*N%CO'rALD@1*N`%Du4'ALD@_mY*M%Dif'JAKD@g*M)W%Da'MAND@J?*L%D'&AND@,/+*L.%D"',ALD@1*K:%C'sALD@_mY*J%B'AND@*KVh%C$'"AND@?*J$%C8'_IAKD@˜/+*I%C'<-*@8%?%&APD@g?*@P%@ +&)APD@$I o*?{W%?&AND@),H +*?*%@&tAND@.*>F%@i<&APD@2 D*>!w%@&^ APD@7ӟ`*=h%@}&APD@< *=%?ې&&APD@A/+*<1%>8t&wAPD@F{y*;%=&APD@K]e*;\%=LV&v8APD@P@H +*:/%>&&APD@U"*:]%? +&APD@Z D*:m0%?W&=APD@^`*:M{%?&wAPD@c *9R%>J)&6APD@h/+*8I%<:&CAPD@my*7|%;q%&QAPD@rqe*71%;3*&}eAPD@wTH +*6%%;&APD@|6*5D%;hK&_APD@ D*4%-*1IJ%<a&!APD@?*/%:)&ణAPD@ o*-5\%8NX&ݴAPD@h@W **G%6&APD@Jڲ*&%3v&ձAQD@-_mY*%<%3j&ӀARD@*%j%5I&ӻAPD@_;*'%8 i&֥)AQD@B**+$%9&ARD@*+%:L&ѣAPD@*,T%9U&ܕ,APD@|!*,ʷ%8&,pARD@_y*,%7ɇ&MARD@Ae*-%8&A#APD@$H +*-2{%8*&ݱAAQD@ڲ*,l%8.&UARD@@%N*,%9߿&`APD@˿*,!%;w &>AQD@_;*,?%=f&;2ARD@8*,%> &ARD@s*+%=&۩ARD@Vc|*+|^%= &ۀyAPD@8!*+v%<&yAQD@y**%; &$ARD@e*)%8&'ARD@ @W *)$%8&؁AQD@¿ڲ*(%8"&FAQD@_mY*(%:&ARD@*(%p&&JAQD@"LB*'b%?w6&AQD@'/`*'T%?c&|ARD@, *'%@.4&j"ARD@0>-*'%@&­AQD@5?*&%>G6&՛AQD@:Tz*%%;\|&ARD@?`*%1)%:&r ARD@D1*$~u%:M&ҍcAQD@Iac*$%=& AQD@NDH +*$g%>/&oARD@S&*$D%@7&CARD@X @%N*#I%@u +&xAQD@\*#I%@&xAQD@a_;*"%?n\&АBATD@fB*"$%={&ϊPARD@k*!<%;&&ARD@pv *!z%:D&ΰAQD@uX/+* %9[&|AQD@z;?* k%;&͉~ARD@ o* /%=&ԷARD@`* .%@+&^ATD@1*%Bm&ZAQD@c* %C &gAQD@H +*%A&gxARD@*xm%@Ļ&זARD@m@%N*2%>"&AQD@O*%=5&wAQD@2_;*%;&ATD@B*q%;P&?ARD@*%p&?LAQD@_;*V%?B&HAQD@xB*89%? &ARD@[*%A=&>zARD@> *u%A&IAQD@ /+*@%DoM&AQD@?*%D.^&ZATD@Tz*:%D&؝ARD@ `*)I%F&4ARD@1*Q%F& %AQD@c*K%F.&?AQD@pH +*%G0&cARD@ R*j%E(&@ARD@%5@%N*K%DT&AQD@**J%C&$?APD@iB*%B>&0AQD@n{`*%B&EARD@s^ * %C &b%ARD@x@>-* D%E&AQD@}#?* S%G?v&rVAPD@ o* %IjW&)ARD@`* ^5%Jd&wAQD@"* 5%I/O&vAQD@c* %F)& ARD@* 6%D`&&APD@;!)I%K&/APD@=ڲ)Oa%Lq&2AKD@@Tz)h%Kq&:ALD@Buy)5%IF&J_APD@D`)p%G &APD@GW`)%F&tdAKD@Iȿڲ)%EU&ALD@L:)0t%E&APD@N@%N) %E#&yALD@Q`)%FVK&AKD@S`)#%H&?APD@U8)W%JA&a APD@Xp )e%Kt&,ALD@Z@%N)%L&`AKD@]R!)޳%M&AKD@_ß`) x%M3&ALD@b48)%M&APD@d )%Lc&gALD@g@%N){%Kt&AKD@i!)%J$H&AKD@k`)8%I&ALD@nj8)a/%HN&ALD@p)qn%EO&ALD@sM@%N)^%Dif& 2ALD@u`)1%Bv&AKD@x/`)#%BL&AKD@zڲ)퐜%A& +ALD@})%A&ALD@y)%C&qALD@`)*'%E{&݊ALD@e)d%G.&!AKD@ֿڲ)괁%Jx&6AKD@GTz)릞%L:& AHD@B)D%L8&QALD@*?)T%I&UALD@_mY))%Gh&AHD@ !)9%C[&5CALD@} o)p%A+&!ALD@8)|*%@&&AHD@`)%?&hAHD@y)敕%?&ALD@B`)冥%@&ALD@)q%=[&4rAHD@$)⪊%>&AHD@ o)?%?&ALD@1)%BS.& ALD@x )a%D(w&>AHD@@%N){%F&:AHD@Z`)%Gw&AHD@ˀ)y%F&MsAHD@<)g%E(&BbAHD@ o):;%E &nAHD@8)U%A&#ALD@ )<%?؝&BALD@@%N)O%=C{&FAHD@r`)%;Y&AAHD@)C%:6&?AHD@T)K%9L,&AHD@ o)%:L&AHD@68)x%;&v=ACD@ͧTz)J%=&ACD@B)ڰ%@&8AHD@Ҋ )zz%Ck&wXAHD@@%N)J%Da&AHD@l`)&]%E&AHD@݀)ؑ%Cn&PAHD@N)A%Bj& 5ACD@޿`)O%A&pACD@0ڲ)4%>&zAHD@Tz)֠%<<&\AHD@1)3%9C&AHD@ )׺%9xr&ACD@y)%9/&#ACD@f?)կ>%9f&AHD@_mY)%;V&UADD@H`)Ԕ3%>)& ADD@)r1%?w&)AHD@*):%@&ACD@`)ӡ%?B&qACD@ ڲ)g%>AO&ADD@} o)%;&2ADD@8)ĭ%8&ACD@_Tz)t%5& ACD@B)Њ%5.~&wnADD@B)w%7>&rADD@ +y)%:8J&yADD@ $ )j%=4&TADD@@%N)w%>m&ACD@?)}s%=~&'TA@D@w@%N)|Y%= g&ADD@`)E%&pMA8D@S_mY)%2&pL4A8D@?)7%1K&o;A<D@5@%N)m%1:#&nA<D@ )|%0&lA8D@1)M%2&lA4D@)%*l&h^SA8D@a)<%)6&fA8D@`)?n%(&eoA8D@C@%N)%)<&eВA8D@ )%*.&dA4D@$8)%.&eQ%!!&VSA,D@y)v%!&UA,D@=Tz)A%#&SA0D@)%&&SLA0D@_mY)%+/&SޘA,D@)V%1K&SA,D@ ڲ)%5+&TE(A0D@ q)B%5uT&RA,D@ )|%4$&R,A,D@R8)%1&Q}A,D@À)%/&S'A,D@4?)i%,&RrA,D@8) +%)K&QTA(D@)t"%&i&Q8~A,D@?)A%!&P]A,D@ 8)Z% 1&NKA(D@#g)<%!&O +A,D@%?)4%$D&OA,D@(H8)b%)&N A(D@*)t%1:&PlA(D@-* )z %4&MiA'D@/ڲ)=%6<&KA'D@2 _mY)'%6&GA,D@4| )L%3p&E A,D@6ڲ)K2%0 &BA'D@9]_mY)|%.F&@uA$D@;Tz)2%+AE&=lA$D@>>!)˔%'ma&9VA(D@@y)%$?&7nA(D@C o)%"(&9A(D@E`)-% ;&;A'D@HB)(% q&?A$D@Jq)ú% 2&BB5A$D@L )%~&CNA'D@ORڲ)C%!&CA$D@Q@%N)-%##n&EXxA$D@T3Tz)%&6&D޸A'D@V!)x%* &DA$D@YB)}%/&F!A$D@[`)n%46&FCA$D@] )%8&EA D@`f)%;M&DHA$D@b@%N)%< &E! A$D@eG o)%;&CAD@g?)gq%9G&CA$D@j(8)a%8G&C A$D@l_mY)K%6&D#AD@o Tz)*%4)&EUA D@qz`)%0&DͿA D@s8)U%*B&CmAD@v[_mY)"%&]&COAD@xTz).l%#)U&AA D@{<`)KL%"&@`aA D@}8)`%%n&?3TAD@€_mY),%,?&>AD@‚Tz)'%4n&>HA D@„`)7%;&>A D@‡n8)@e%?e&? +AD@‰_mY)n%?&>1AD@ŒO o)@e%>|V&? +AD@Ž?)ѷ%<&>}AD@‘0ڲ)<%:&=elAD@“y)/'%9"&=AD@–`)v%70&>~AD@˜)%3&<AD@š!)*%00&<^AD@b8)%,S&;̤AD@Ÿ_mY)@%'Ԯ&9;3AD@¢C o)=%%&8AD@¤ )%' &9|AD@§$)%*`&9:vAD@©B)=2%/&8?AD@¬_mY)&%68 &8yAD@®u o)P%=&8AD@° )S%C&8IAD@³V!)oX%H&8qAD@µ8)#%Es&78+AD@¸7@%N)V%CW&7yAD@º`)%B&8!AD@½)S%?؝&8JAD@¿`)%;&9AD@ڲ)`%5I&7 +AD@iy)%1T&7#AD@_mY)%-&5}rAD@I o) %%.7&5AD@˺ )%0&5; AD@*`)9P%7_F&4=AD@Кڲ)(%?qO&2{AD@ y)D%F&2CAD@{_mY)%J\\&3)AD@ o)qU%I9&2|AD@\)yZ%H&3ΈAD@?)%E&2AD@<)%@Љ&4XAD@8)%:Ը&4(AD@y)%4&2 AD@_mY)5%1ӝ&1YAD@`)m%1&/AD@n) %5Q&0pAD@?)B%%Nbm&/mAD@/1).%W&/A D@y)6%X>&.Z{A D@_mY) +Z%Z&, AD@`)Q%R\&,WrAD@Tz)`%P&,tAD@` )d%Qj&-MA D@?)%SfR&,7A D@@!)%T_&*A D@)0%L=&*{A D@ 8)%K&*A D@ B)#%G&)ݞA D@@%N).?%CW&)0A D@q_mY)%C&)A D@`)f%A&)A D@Q o)%=^ &(AD@Tz)&%<&)']AD@2)|%=j&(LA D@?)8%B&)>=A D@!`) g%H;&'AD@#!)K%Q&(S)bH%L7&%AD@@ß`).%I&%ZAD@C3`)q%K&%qAD@E o)l"%P&$asAD@H o)|%X=&!AD@J o)~%a&"AD@LTz)~$%h &"AD@OcTz)~;%n`&"AD@QTz)%ri&#lAD@TCTz) %t-`&#AD@VTz)~%t9.&"AD@Y#Tz){%s&!/AD@[Tz){%tG& tAD@^Tz)x%t&4AD@`sTz)wv%uE&`xAD@bTz){e;%u +& AD@eSTz)xh%sAB&$AD@g o)z]6%r& ;AD@j3 o)vs%o& AD@l o)x%nc&! AD@o`)yO%l4&3AD@q`)wv%i-&AD@s)w%h/&AD@vc)u%e&HAD@x_mY)t%eP&AD@{C@%N)tp%dq&AD@}@%N)u2%f &RAD@À#y)u%hGs&>|AD@Â1)t%i&&<;AD@Å8)s/%jo&AD@Çrڲ)r%o;&\AD@É)s i%w,&$AD@ÌR!)r%&kjAD@Î`)qF%c#&ĨAD@Ñ2?)p@%l&#AD@Ó )m\%[&mAD@Ö)l%&,AD@Ø o)l :%&AD@Ú`)k%&AD@Ýa)jc%&WIAD@ß@%N)h-%&#AD@âAy)gl%&-AD@ä8)h%wX&AD@ç ڲ)eg%&ќAD@é!)g%&TAD@ì?)jq%.,& IAD@îp )hT%&:AD@ðTz)e%*&6AD@óO`)c2%_h&hAD@õ_mY)e%k&$AD@ø/y)dN4% &AD@ú8)a+%&AD@ý)\v%& AD@ÿ~`)Z%Lc& DAD@ )V%& gAD@] o)T%A&OVAD@̀)R,%-&AD@=@%N)Qҝ%W&IZAD@ˬ8)V%& XAD@)Vp%& wAD@Ќ?)[%z& 9]AD@)_-%La&#AD@k`)_0%h& AD@_mY)_%d&,4AD@K1)^%F&nAD@ܺ)[%& GAD@*?)[{%& xAD@Tz)_Nv%~&AD@ )^%%&AAD@yy)`%6&!AD@ڲ)_ %:M&HAD@X`)^%R&AD@)]su%& AD@7`)^%ng&YAD@y)^O%&AD@ڲ)_%Rr&nAD@`)^ +%&AD@Tz)`2%U0&IAD@e)Uz%<&AD(@B)U_&W&\AD( \ No newline at end of file diff --git a/tests/test_data/mock_northup.fits b/tests/test_data/mock_northup.fits new file mode 100644 index 00000000..6bdf274c --- /dev/null +++ b/tests/test_data/mock_northup.fits @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9534a937f2adbdeee326d6ef28c6c1d90221f3dcf34d4e9f07334699994083ce +size 25182720 diff --git a/tests/test_flat_div.py b/tests/test_flat_div.py index 904362f6..48b6c80e 100644 --- a/tests/test_flat_div.py +++ b/tests/test_flat_div.py @@ -77,9 +77,25 @@ def test_flat_div(): print("Error estimated:",err_estimated) assert(err_flatdiv == pytest.approx(err_estimated, abs = 1e-2)) - print(flatdivided_dataset[0].ext_hdr) + # print(flatdivided_dataset[0].ext_hdr) corgidrp.track_individual_errors = old_err_tracking + + ### Check to make sure DQ gets set when flatfield zero. ### + + ## Injected some 0s. + pixel_list = [[110,120],[50,50], [100,100]] + for pixel in pixel_list: + flatfield.data[pixel[0],pixel[1]] = 0. + + ## Perform flat division + flatdivided_dataset_w_zeros = l2a_to_l2b.flat_division(simflat_dataset, flatfield) + + ## Check that all the pixels that were zeroed out have the DQ flag set to 4. + for pixel in pixel_list: + for i in range(len(simdata_filenames)): + assert np.bitwise_and(flatdivided_dataset_w_zeros.all_dq[i,pixel[0],pixel[1]],4) + if __name__ == "__main__": test_flat_div() \ No newline at end of file diff --git a/tests/test_fluxcal.py b/tests/test_fluxcal.py new file mode 100644 index 00000000..4cadb313 --- /dev/null +++ b/tests/test_fluxcal.py @@ -0,0 +1,122 @@ +import pytest +import os +import numpy as np +from corgidrp.mocks import create_default_headers +from corgidrp.data import Image, Dataset +import corgidrp.fluxcal as fluxcal +import corgidrp.l4_to_tda as l4_to_tda +from astropy.modeling.models import BlackBody +import astropy.units as u + +data = np.ones([1024,1024]) * 2 +err = np.ones([1,1024,1024]) * 0.5 +prhd, exthd = create_default_headers() +exthd["CFAMNAME"] = '3C' +image1 = Image(data,pri_hdr = prhd, ext_hdr = exthd, err = err) +image2 = image1.copy() +dataset=Dataset([image1, image2]) +calspec_filepath = os.path.join(os.path.dirname(__file__), "test_data", "bd_75d325_stis_006.fits") + +def test_get_filter_name(): + """ + test that the correct filter curve file is selected + """ + global wave + global transmission + filepath = fluxcal.get_filter_name(dataset) + assert filepath.split("/")[-1] == 'transmission_ID-21_3C_v0.csv' + + wave, transmission = fluxcal.read_filter_curve(filepath) + + assert np.any(wave>=7130) + assert np.any(transmission < 1.) + + #test a wrong filter name + image3 = image1.copy() + image3.ext_hdr["CFAMNAME"] = '5C' + dataset2 = Dataset([image3, image3]) + with pytest.raises(ValueError): + filepath = fluxcal.get_filter_name(dataset2) + pass + + +def test_flux_calc(): + """ + test that the calspec data is read correctly + """ + calspec_flux = fluxcal.read_cal_spec(calspec_filepath, wave) + assert calspec_flux[0] == pytest.approx(2e-13, 1e-15) + + band_flux = fluxcal.calculate_band_flux(transmission, calspec_flux, wave) + eff_lambda = fluxcal.calculate_effective_lambda(transmission, calspec_flux, wave) + assert eff_lambda == pytest.approx((wave[0]+wave[-1])/2., 3) + +def test_colorcor(): + """ + test that the pivot reference wavelengths is close to the center of the bandpass + """ + + lambda_piv = fluxcal.calculate_pivot_lambda(transmission, wave) + assert lambda_piv == pytest.approx((wave[0]+wave[-1])/2., 0.3) + + calspec_flux = fluxcal.read_cal_spec(calspec_filepath, wave) + ## BB of an O5 star + bbscale = 1.e-21 * u.erg/(u.s * u.cm**2 * u.AA * u.steradian) + flux_source = BlackBody(scale = bbscale, temperature=54000.0 * u.K) + K_bb = fluxcal.compute_color_cor(transmission, wave, calspec_flux, lambda_piv, flux_source(wave)) + assert K_bb == pytest.approx(1., 0.01) + + flux_source = BlackBody(scale = bbscale, temperature=100. * u.K) + K_bb = fluxcal.compute_color_cor(transmission, wave, calspec_flux, lambda_piv, flux_source(wave)) + assert K_bb > 2 + # sanity check + K = fluxcal.compute_color_cor(transmission, wave, calspec_flux, lambda_piv, calspec_flux) + assert K == 1 + + # test the corresponding pipeline step + output_dataset = l4_to_tda.determine_color_cor(dataset, calspec_filepath, calspec_filepath) + assert output_dataset[0].ext_hdr['LAM_REF'] == lambda_piv + assert output_dataset[0].ext_hdr['COL_COR'] == K + # test it with star names + calspec_name = 'Vega' + source_name = 'TYC 4424-1286-1' + output_dataset = l4_to_tda.determine_color_cor(dataset, calspec_name, source_name) + assert output_dataset[0].ext_hdr['LAM_REF'] == lambda_piv + assert output_dataset[0].ext_hdr['COL_COR'] == pytest.approx(1,1e-2) + +def test_calspec_download(): + """ + test the download of a calspec fits file + """ + filepath = fluxcal.get_calspec_file('Vega') + assert os.path.exists(filepath) + os.remove(filepath) + filepath = fluxcal.get_calspec_file('TYC 4424-1286-1') + assert os.path.exists(filepath) + os.remove(filepath) + + with pytest.raises(ValueError): + filepath = fluxcal.get_calspec_file('Todesstern') + +def test_app_mag(): + """ + test the calculation of the apparent Vega magnitude + """ + # test the corresponding pipeline step + # sanity check + output_dataset = l4_to_tda.determine_app_mag(dataset, 'Vega') + assert output_dataset[0].ext_hdr['APP_MAG'] == 0. + output_dataset = l4_to_tda.determine_app_mag(dataset, calspec_filepath) + assert output_dataset[0].ext_hdr['APP_MAG'] == pytest.approx(9.55, 0.3) + output_dataset = l4_to_tda.determine_app_mag(dataset, calspec_filepath, scale_factor = 0.5) + assert output_dataset[0].ext_hdr['APP_MAG'] == pytest.approx(9.55+-2.5*np.log10(0.5), 0.3) + +if __name__ == '__main__': + test_get_filter_name() + test_flux_calc() + test_colorcor() + test_calspec_download() + test_app_mag() + + + diff --git a/tests/test_kgain_cal.py b/tests/test_kgain_cal.py index 199216d3..4783724b 100644 --- a/tests/test_kgain_cal.py +++ b/tests/test_kgain_cal.py @@ -122,7 +122,7 @@ def count_contiguous_repeats(arr): divide_em=True) image_sim.ext_hdr['DATETIME'] = time_stack_arr0[t+j*exp_repeat_counts[j]] # OBSTYPE has no KGAIN value, but NONLIN - image_sim.ext_hdr['OBSTYPE'] = 'NONLIN' + image_sim.pri_hdr['OBSTYPE'] = 'NONLIN' frame_list.append(image_sim) dataset_kg = Dataset(frame_list) diff --git a/tests/test_mean_combine.py b/tests/test_mean_combine.py index 84dec301..da1932c1 100644 --- a/tests/test_mean_combine.py +++ b/tests/test_mean_combine.py @@ -29,7 +29,7 @@ def test_mean_im(): - """Verify method calculates mean image and erorr term.""" + """Verify method calculates mean image and error term.""" tol = 1e-13 check_combined_im = np.mean(check_ims, axis=0) diff --git a/tests/test_northup.py b/tests/test_northup.py new file mode 100644 index 00000000..e7d98494 --- /dev/null +++ b/tests/test_northup.py @@ -0,0 +1,73 @@ +from corgidrp import data +from corgidrp.l3_to_l4 import northup +from astropy.io import fits +from matplotlib import pyplot as plt +import numpy as np +import os +from glob import glob + +def test_northup(save_derot_dataset=False,save_comp_figure=False): + """ + unit test of the northup function + + Args: + save_derot_dataset (optional): if you want to save the derotated file at the input directory, turn True + save_comp_figure (optional): if you want to save a comparison figure of the original mock data and the derotated data + + """ + + # read mock file + dirname = 'test_data' + mockname = 'mock_northup.fits' + + mockfilepath = os.path.join(os.path.dirname(__file__),dirname,mockname) + if not mockfilepath: + raise FileNotFoundError(f"No mock data {mockname} found") + + # running northup function + input_dataset = data.Dataset(glob(mockfilepath)) + derot_dataset = northup(input_dataset) + # save fits file + if save_derot_dataset is True: + derot_dataset.save(dirname,[mockname.split('.fits')[0]+'_derotated.fits']) + + # read the original mock file and derotated file + im_input = input_dataset[0].data + roll_angle = input_dataset[0].pri_hdr['ROLL'] + im_derot = derot_dataset[0].data + dq_input = input_dataset[0].dq + dq_derot = derot_dataset[0].dq + # the location for test, where the mock file has 1 in DQ + x_value1 = input_dataset[0].dq_hdr['X_1VAL'] + y_value1 = input_dataset[0].dq_hdr['Y_1VAL'] + + # check if rotation works properly + assert(im_input[y_value1,x_value1] != im_derot[y_value1,x_value1]) + assert(dq_input[y_value1,x_value1] != dq_derot[y_value1,x_value1]) + + # check if the derotated DQ frame has no non-integer values (except NaN) + non_integer_mask = (~np.isnan(dq_derot)) & (dq_derot % 1 != 0) + non_integer_indices = np.argwhere(non_integer_mask) + assert(len(non_integer_indices) == 0) + + # save comparison figure + if save_comp_figure is True: + + fig, (ax0, ax1) = plt.subplots(nrows=1, ncols=2, sharex=True, figsize=(8,5)) + + ax0.set_title('Original Mock Data') + ax0.imshow(im_input,origin='lower') + + ax1.set_title(f'Derotated Mock Data\n by {-roll_angle}deg counterclockwise') + ax1.imshow(im_derot,origin='lower') + + outdir = os.path.join(os.path.dirname(__file__),dirname) + os.makedirs(outdir, exist_ok=True) + outfilename = 'compare_northup.png' + + plt.savefig(os.path.join(outdir,outfilename)) + + print(f"Comparison figure saved at {dirname+outfilename}") + +if __name__ == "__main__": + test_northup() diff --git a/tests/test_ops.py b/tests/test_ops.py index d6bb6c22..67b14375 100644 --- a/tests/test_ops.py +++ b/tests/test_ops.py @@ -81,5 +81,8 @@ def test_ops_produces_expected_file(): for output_file in output_filelist: assert os.path.exists(output_file), f"Expected output file {output_file} does not exist." + ### Clean up + mycaldb.remove_entry(new_nonlinearity) + if __name__ == "__main__":# test_ops_produces_expected_file() \ No newline at end of file diff --git a/tests/test_sort_pupilimg_frames.py b/tests/test_sort_pupilimg_frames.py new file mode 100644 index 00000000..81e7aa36 --- /dev/null +++ b/tests/test_sort_pupilimg_frames.py @@ -0,0 +1,582 @@ +import os +import copy +import pytest +import random +import numpy as np +from pathlib import Path +import astropy.io.fits as fits + +from corgidrp import sorting as sorting +import corgidrp.data as data +from corgidrp.data import Image +from corgidrp.mocks import create_default_headers + +# Functions +def get_cmdgain_exptime_mean_frame( + exptime_sec=None, + nframes=None, + ): + """ + Create an array of CMDGAIN, EXPTIME for frames that will be used to + generate a mean frame. + + Args: + exptime_sec (float): exposure time of the frame in seconds + nframes (int): (minimum) number of frames to generate a mean frame + + Returns: + cmdgain_list (list): list of commanded gains + exptime_list (list): list of exposure frames + """ + if exptime_sec is None: + raise Exception('Missing input exposure times for mean frame data') + if nframes is None: + raise Exception('Missing number of frames for mean frame data') + + cmdgain_list = [1] * nframes + exptime_list = [exptime_sec] * nframes + return cmdgain_list, exptime_list + +def get_cmdgain_exptime_kgain( + exptime_sec=None, + nframes=None, + ): + """ + Create an array of CMDGAIN, EXPTIME for frames that will be used to + calibrate k-gain. + + Args: + exptime_sec (list): set of distinct exposure times n seconds chosen to + collect frames for K-gain calibration + nframes (int): number of frames per ditinct exposure time + + Returns: + cmdgain_list (list): list of commanded gains + exptime_list (list): list of exposure frames + """ + if exptime_sec is None: + raise Exception('Missing input exposure times for K=gain calibration data') + if nframes is None: + raise Exception('Missing input number of frames for K=gain calibration data') + + cmdgain_list = [1] * (len(exptime_sec) * nframes) + exptime_list = [] + for exptime in exptime_sec: + exptime_list += [exptime] * nframes + return cmdgain_list, exptime_list + +def get_cmdgain_exptime_nonlin( + exptime_sec=None, + nonunity_em=None, + change_exptime=False, + ): + """ + Create an array of CMDGAIN, EXPTIME for frames that will be used to + calibrate non-linearity. + + Args: + exptime_sec (list): set of distinct exposure times in seconds chosen to + collect frames for non-linearity calibration for each EM gain + nonunity_em (list): set of ditinct (non-unity) EM gains chosen to collect + data for non-linearity + change_exptime (bool) (optional): if True, it will change the input exposure + times by a small amount without changing the ordering of exptime_sec + + Returns: + cmdgain_list (list): list of commanded gains + exptime_list (list): list of exposure frames + """ + if exptime_sec is None: + raise Exception('Missing input exposure times for non-linearity calibration data') + if nonunity_em is None: + raise Exception('Missing input EM gain for non-linearity calibration data') + + cmdgain_list = [] + exptime_list = [] + fac_change = 0 + if change_exptime: + # Avoid the (unlikely) coincidence of +1/-1 in the uniform distribution + fac_change = min(np.abs(np.diff(exptime_sec))) / 3 + for emgain in nonunity_em: + cmdgain_list += [emgain] * len(exptime_sec) + exptime_sec = (np.array(exptime_sec) * + (1 + fac_change*random.uniform(-1, 1))) + exptime_list += exptime_sec.tolist() + return cmdgain_list, exptime_list + +def get_cmdgain_exptime_emgain( + em_emgain=None, + exptime_emgain_sec=None, + nframes=None, + ): + """ + Create an array of CMDGAIN, EXPTIME for frames that will be used to + calibrate EM-gain vs DAC. Notice the pairing between unity and non-unity + gain frames. + + Args: + em_emgain (list): set of ditinct (non-unity) EM gains chosen to collect + data for EM gain with pupil images + exptime_emgain_sec (list): set of distinct exposure times in seconds chosen to + collect frames for non-linearity calibration for each EM gain + nframes (int): number of frames per ditinct exposure time + + Returns: + cmdgain_list (list): list of commanded gains + exptime_list (list): list of exposure frames + """ + if em_emgain is None: + raise Exception('Missing input EM gain for EM-gain calibration data') + if exptime_emgain_sec is None: + raise Exception('Missing input exposure times for EM-gain calibration data') + if nframes is None: + raise Exception('Missing input number of frames for EM-gain calibration data') + + # Create pairs of frames + cmdgain_list = [] + for idx in range(len(em_emgain)): + cmdgain_list += [em_emgain[idx]] * nframes[idx] + exptime_list = [] + for idx in range(len(exptime_emgain_sec)): + exptime_list += [exptime_emgain_sec[idx]] * nframes[idx] + # Unity and non-unity gains + return cmdgain_list, exptime_list + +def make_minimal_image( + cmdgain=1, + exptime_sec=0, + frameid=0, + ): + """ + This function makes a mock frame with minimum memory in its data and error + fields. It is used in this test script only. + + Args: + cmdgain (float): commanded gain of the frame + exptime_sec (float): exposure time of the frame + frameid (int): an integer value used to indentify the frame + + Returns: + filename (String): filename with path of the generated FITS file + """ + signal = np.zeros(1) + + prhd, exthd = create_default_headers() + # Mock error maps + err = np.ones(1) + dq = np.zeros(1, dtype = np.uint16) + # Creating a FITS file to assign it a filename with the frame ID + prim = fits.PrimaryHDU(header = prhd) + hdr_img = fits.ImageHDU(signal, header=exthd) + hdul = fits.HDUList([prim, hdr_img]) + # Record actual commanded EM + hdul[1].header['CMDGAIN'] = cmdgain + # Record actual exposure time + hdul[1].header['EXPTIME'] = exptime_sec + # Add corresponding VISTYPE + hdul[0].header['VISTYPE'] = 'PUPILIMG' + # IIT filename convention. TODO: replace with latest L1 filename version + filename = str(Path('simdata', f'CGI_EXCAM_L1_{frameid:0{10}d}.fits')) + hdul.writeto(filename, overwrite = True) + return filename + +def setup_module(): + global EXPTIME_MEAN_FRAME, NFRAMES_MEAN_FRAME + global EXPTIME_KGAIN, NFRAMES_KGAIN + global EXPTIME_NONLIN, CMDGAIN_NONLIN + global EXPTIME_EMGAIN_SEC, EXPTIME_EMGAIN_SEC + global n_mean_frame_total, n_kgain_total, n_nonlin_wo_change_total, n_nonlin_w_change_total + global dataset_w_change, dataset_wo_change + # Note: the values for the non-unity em gains and the number + # of frames used for the mean frame, K-gain, non-linearity and EM-gain vs DAC + # calibration come from either TVAC or some preliminary version of the + # Commissioning test calculations + + # Global constants + # Mean frame + EXPTIME_MEAN_FRAME = 5 + NFRAMES_MEAN_FRAME = 30 + # Checks + if NFRAMES_MEAN_FRAME < 30: + raise Exception(f'Insufficient frames ({NFRAMES_MEAN_FRAME}) for the mean frame') + + # K-gain + EXPTIME_KGAIN = [0.077, 0.770, 1.538, 2.308, 3.077, 3.846, 4.615, 5.385, 6.154, + 6.923, 7.692, 8.462, 9.231, 10.000, 10.769, 11.538, 12.308, 13.077, 13.846, + 14.615, 15.385, 1.538] + NFRAMES_KGAIN = 5 + # Checks + if NFRAMES_KGAIN < 5: + raise Exception(f'Insufficient frames ({NFRAMES_KGAIN}) per unique exposure time in k-gain') + if len(EXPTIME_KGAIN) < 22: + raise Exception(f'Insufficient unique exposure times ({len(EXPTIME_KGAIN)}) in k-gain') + if np.all(np.sign(np.diff(EXPTIME_KGAIN[:-1])) == 1) is False: + raise Exception('Exposure times in K-gain must be monotonic but for the last value') + + # Non-linearity + EXPTIME_NONLIN = [0.076, 0.758, 1.515, 2.273, 3.031, 3.789, 4.546, 5.304, 6.062, + 6.820, 7.577, 8.335, 9.093, 9.851, 10.608, 11.366, 12.124, 12.881, 13.639, + 14.397, 15.155, 1.515] + CMDGAIN_NONLIN = [1.65, 5.24, 8.60, 16.70, 27.50, 45.26, 87.50, 144.10, 237.26, + 458.70, 584.40] + # Checks + if len(EXPTIME_NONLIN) < 22: + raise Exception(f'Insufficient frames ({len(EXPTIME_NONLIN)}) per unique EM value in non-linearity') + if len(CMDGAIN_NONLIN) < 11: + raise Exception(f'Insufficient values of distinct EM Values ({len(EXPTIME_NONLIN)}) in non-linearity') + if np.sum(np.array(EXPTIME_NONLIN) == EXPTIME_NONLIN[-1]) != 2: + raise Exception('Last exposure time must be repeated once') + if len(set(EXPTIME_NONLIN)) != len(EXPTIME_NONLIN) - 1: + raise Exception('Only one exposure time can be repeated in non-linearity') + if EXPTIME_NONLIN[-1] in EXPTIME_NONLIN[0:5] is False: + raise Exception('The last exposure time must be present at the beginning of the exposure times in non-linearity') + if np.all(np.sign(np.diff(EXPTIME_NONLIN[:-1])) == 1) is False: + raise Exception('Exposure times in Non-linearity must be monotonic but for the last value') + + # Notice the pairing between unity and non-unity gain frames + EM_EMGAIN=[1.000, 1.000, 1.007, 1.015, 1.024, 1.035, 1.047, 1.060, 1.076, 1.094, + 1.115, 1.138, 1.165, 1.197, 1.234, 1.276, 1.325, 1.385, 1.453, 1.534, 1.633, + 1.749, 1.890, 2.066, 2.278, 2.541, 2.873, 3.308, 3.858, 4.581, 5.577, 6.189, + 6.906, 7.753, 8.757, 9.955, 11.392, 13.222, 15.351, 17.953, 21.157, 25.128, + 30.082, 36.305, 44.621, 54.768, 67.779, 84.572, 106.378, 134.858, 172.244, + 224.385, 290.538, 378.283, 494.762, 649.232, 853.428] + EXPTIME_EMGAIN_SEC=[5, 10, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 10, 10, 10, + 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10] + NFRAMES_EMGAIN=[3, 5, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5] + # Checks + if len(EM_EMGAIN) < 56: + raise Exception(f'Insufficient number of EM gain values ({len(EM_EMGAIN)}) in EM-gain vs DAC') + if len(EXPTIME_EMGAIN_SEC) != len(EM_EMGAIN): + raise Exception(f'Inconsistent number of sets in EM-gain vs DAC') + if len(EXPTIME_EMGAIN_SEC) != len(NFRAMES_EMGAIN): + raise Exception(f'Inconsistent number of sets in EM-gain vs DAC') + + # Test data: Consider two possible scenarios: identical exposure times among the + # different subsets of non-unity gain used to calibrate non-linearity or different + # values of exposure times + + # Values for mean frame + cmdgain_mean_frame, exptime_mean_frame = get_cmdgain_exptime_mean_frame( + exptime_sec=EXPTIME_MEAN_FRAME, + nframes=NFRAMES_MEAN_FRAME, + ) + + if len(cmdgain_mean_frame) != len(exptime_mean_frame): + raise Exception('Inconsistent lengths in the mean frame') + # Total number of frames + n_mean_frame_total = len(cmdgain_mean_frame) + + # Values for K-gain + cmdgain_kgain, exptime_kgain = get_cmdgain_exptime_kgain( + exptime_sec=EXPTIME_KGAIN, + nframes=NFRAMES_KGAIN, + ) + if len(cmdgain_kgain) != len(exptime_kgain): + raise Exception('Inconsistent lengths in k-gain') + # Total number of frames + n_kgain_total = len(cmdgain_kgain) + + # Values for Non-linearity + cmdgain_nonlin_wo_change, exptime_nonlin_wo_change = get_cmdgain_exptime_nonlin( + exptime_sec=EXPTIME_NONLIN, + nonunity_em=CMDGAIN_NONLIN, + change_exptime=False, + ) + if len(cmdgain_nonlin_wo_change) != len(exptime_nonlin_wo_change): + raise Exception('Inconsistent lengths in non-linearity') + # Total number of frames + n_nonlin_wo_change_total = len(cmdgain_nonlin_wo_change) + + cmdgain_nonlin_w_change, exptime_nonlin_w_change = get_cmdgain_exptime_nonlin( + exptime_sec=EXPTIME_NONLIN, + nonunity_em=CMDGAIN_NONLIN, + change_exptime=True, + ) + if len(cmdgain_nonlin_w_change) != len(exptime_nonlin_w_change): + raise Exception('Inconsistent lengths in non-linearity') + # Total number of frames + n_nonlin_w_change_total = len(cmdgain_nonlin_w_change) + + # Values for EM-gain vs DAC + cmdgain_emgain, exptime_emgain = get_cmdgain_exptime_emgain( + em_emgain = EM_EMGAIN, + exptime_emgain_sec = EXPTIME_EMGAIN_SEC, + nframes = NFRAMES_EMGAIN, + ) + if len(cmdgain_emgain) != len(exptime_emgain): + raise Exception(f'Inconsistent lengths in em-gain vs dac') + # Total number of frames + n_emgain_total = len(cmdgain_emgain) + + # DRP Dataset + # Create directory for temporary data files (not tracked by git) + if not os.path.exists(Path('simdata')): + os.mkdir(Path('simdata')) + + idx_frame = 0 + filename_list = [] + # Mean frame + print('Generating frames for mean frame') + for i_f in range(n_mean_frame_total): + filename = make_minimal_image( + cmdgain=cmdgain_mean_frame[i_f], + exptime_sec=exptime_mean_frame[i_f], + frameid=idx_frame, + ) + filename_list += [filename] + idx_frame += 1 + # K-gain + print('Generating frames for k-gain') + for i_f in range(n_kgain_total): + filename = make_minimal_image( + cmdgain=cmdgain_kgain[i_f], + exptime_sec=exptime_kgain[i_f], + frameid=idx_frame, + ) + filename_list += [filename] + idx_frame += 1 + # EM-gain + print('Generating frames for em-gain') + for i_f in range( n_emgain_total): + filename = make_minimal_image( + cmdgain=cmdgain_emgain[i_f], + exptime_sec=exptime_emgain[i_f], + frameid=idx_frame, + ) + filename_list += [filename] + idx_frame += 1 + # Non-linearity (two cases) + print('Generating frames for non-linearity') + filename_wo_change_list = copy.deepcopy(filename_list) + for i_f in range(n_nonlin_wo_change_total): + filename = make_minimal_image( + cmdgain=cmdgain_nonlin_wo_change[i_f], + exptime_sec=exptime_nonlin_wo_change[i_f], + frameid=idx_frame, + ) + filename_wo_change_list += [filename] + idx_frame += 1 + + filename_w_change_list = copy.deepcopy(filename_list) + for i_f in range(n_nonlin_w_change_total): + filename = make_minimal_image( + cmdgain=cmdgain_nonlin_wo_change[i_f], + exptime_sec=exptime_nonlin_wo_change[i_f], + frameid=idx_frame, + ) + filename_w_change_list += [filename] + idx_frame += 1 + + # Shuffle file order randomnly + random.shuffle(filename_wo_change_list) + random.shuffle(filename_w_change_list) + + # Create datasets + dataset_wo_change = data.Dataset(filename_wo_change_list) + dataset_w_change = data.Dataset(filename_w_change_list) + + # Delete temporary test FITS + for filepath in filename_wo_change_list: + os.remove(filepath) + for filepath in filename_w_change_list: + # Delete remaining non-linearity FITS + try: + os.remove(filepath) + except: + pass + +def test_kgain_sorting(): + """ + Apply the sorting algorithm to a dataset for K-gain and non-linearity + calibration including EM-gain calibration files in the set to obtain + the dataset needed for K-gain calibration and check the resulting + dataset is consistent with the input dataset. K-gain uses unity gain + frames only. No need to test both non-linearity subsets of data. + """ + dataset_kgain = sorting.sort_pupilimg_frames(dataset_wo_change, cal_type='k-gain') + + # Checks + n_mean_frame = 0 + n_kgain_test = 0 + filename_kgain_list = [] + exptime_mean_frame_list = [] + exptime_kgain_list = [] + # This way there's no need to perform a sum check and identifies any issue + for idx_frame, frame in enumerate(dataset_kgain): + if frame.pri_hdr['OBSTYPE'] == 'MNFRAME': + n_mean_frame += 1 + exptime_mean_frame_list += [frame.ext_hdr['EXPTIME']] + elif frame.pri_hdr['OBSTYPE'] == 'KGAIN': + n_kgain_test += 1 + filename_kgain_list += [frame.filename] + exptime_kgain_list += [frame.ext_hdr['EXPTIME']] + else: + try: + raise Exception((f'Frame #{idx_frame}: Misidentified calibration' + + f"type in the calibration dataset. OBSTYPE={frame.pri_hdr['OBSTYPE']}")) + except: + raise Exception((f'Frame #{idx_frame}: Unidentified calibration', + 'type in the Kgain calibration dataset')) + + # Same number of files as expected + assert n_kgain_test == n_kgain_total + # Unique exposure time for the mean frame + assert len(set(exptime_mean_frame_list)) == 1 + # Expected exposure time for the mean frame + assert exptime_mean_frame_list[0] == EXPTIME_MEAN_FRAME + # Expected number of frames for the mean frame + assert n_mean_frame == NFRAMES_MEAN_FRAME + # Expected identical number of frames per exposure time in K-gain with + # only one repeated case at the end + kgain_unique, kgain_counts = np.unique(exptime_kgain_list, return_counts=True) + assert len(set(kgain_counts)) == 2 + assert min(kgain_counts) == NFRAMES_KGAIN + assert max(kgain_counts) == 2*min(kgain_counts) + # Needs ordering + idx_kgain_sort = np.argsort(filename_kgain_list) + # Expected exposure times for K-gain + exptime_kgain_arr = np.array(exptime_kgain_list)[idx_kgain_sort] + assert len(set(exptime_kgain_arr[-NFRAMES_KGAIN:])) == 1 + assert exptime_kgain_arr[-1] in exptime_kgain_arr[0:-NFRAMES_KGAIN] + +def test_nonlin_sorting_wo_change(): + """ + Apply the sorting algorithm to a dataset for K-gain and non-linearity + calibration including EM-gain calibration files in the set to obtain + the dataset needed for non-linearity calibration and check the + resulting dataset is consistent with the input dataset. This test has + identical exposure times among the different subsets of non-unity gain + used to calibrate non-linearity + """ + dataset_nonlin_wo_change = sorting.sort_pupilimg_frames(dataset_wo_change, cal_type='non-lin') + + # Checks + n_mean_frame = 0 + n_nonlin_test = 0 + filename_nonlin_list = [] + exptime_mean_frame_list = [] + exptime_nonlin_list = [] + cmdgain_nonlin_list = [] + # This way there's no need to perform a sum check and identifies any issue + for idx_frame, frame in enumerate(dataset_nonlin_wo_change): + if frame.pri_hdr['OBSTYPE'] == 'MNFRAME': + n_mean_frame += 1 + exptime_mean_frame_list += [frame.ext_hdr['EXPTIME']] + elif frame.pri_hdr['OBSTYPE'] == 'NONLIN': + n_nonlin_test += 1 + filename_nonlin_list += [frame.filename] + exptime_nonlin_list += [frame.ext_hdr['EXPTIME']] + cmdgain_nonlin_list += [frame.ext_hdr['CMDGAIN']] + # Testing only non-unity gain frames for Non-linearity + elif frame.pri_hdr['OBSTYPE'] == 'KGAIN': + pass + else: + try: + raise Exception((f'Frame #{idx_frame}: Misidentified calibration' + + f"type in the calibration dataset. OBSTYPE={frame.pri_hdr['OBSTYPE']}")) + except: + raise Exception((f'Frame #{idx_frame}: Unidentified calibration', + 'type in the Non-linearity calibration dataset')) + + # Same number of files as expected + assert n_nonlin_test == n_nonlin_wo_change_total + # Unique exposure time for the mean frame + assert len(set(exptime_mean_frame_list)) == 1 + # Expected exposure time for the mean frame + assert exptime_mean_frame_list[0] == EXPTIME_MEAN_FRAME + # Expected number of frames for the mean frame + assert n_mean_frame == NFRAMES_MEAN_FRAME + # Needs ordering + idx_nonlin_sort = np.argsort(filename_nonlin_list) + # Expected exposure times for Non-linearity + exptime_nonlin_arr = np.array(exptime_nonlin_list)[idx_nonlin_sort] + # Subset of unique non-unity EM gains + nonlin_em_gain_arr = np.unique(cmdgain_nonlin_list) + nonlin_em_gain_arr.sort() + assert np.all(nonlin_em_gain_arr == CMDGAIN_NONLIN) + n_exptime_nonlin = len(EXPTIME_NONLIN) + # Expected exposure times (this test keeps the same values for all subsets) + exptime_nonlin_arr = np.array(exptime_nonlin_list)[idx_nonlin_sort] + for idx_em, nonlin_em in enumerate(nonlin_em_gain_arr): + assert np.all(exptime_nonlin_arr[idx_em*n_exptime_nonlin:(idx_em+1)*n_exptime_nonlin] == EXPTIME_NONLIN) + assert (exptime_nonlin_arr[(idx_em+1)*n_exptime_nonlin-1] in + exptime_nonlin_arr[idx_em*n_exptime_nonlin:(idx_em+1)*n_exptime_nonlin-1]) + +def test_nonlin_sorting_w_change(): + """ + Apply the sorting algorithm to a dataset for K-gain and non-linearity + calibration including EM-gain calibration files in the set to obtain + the dataset needed for non-linearity calibration and check the + resulting dataset is consistent with the input dataset. This test has + different exposure times among the different subsets of non-unity gain + used to calibrate non-linearity + """ + dataset_nonlin_w_change = sorting.sort_pupilimg_frames(dataset_w_change, cal_type='non-lin') + + # Checks + n_mean_frame = 0 + n_nonlin_test = 0 + filename_nonlin_list = [] + exptime_mean_frame_list = [] + exptime_nonlin_list = [] + cmdgain_nonlin_list = [] + # This way there's no need to perform a sum check and identifies any issue + for idx_frame, frame in enumerate(dataset_nonlin_w_change): + if frame.pri_hdr['OBSTYPE'] == 'MNFRAME': + n_mean_frame += 1 + exptime_mean_frame_list += [frame.ext_hdr['EXPTIME']] + elif frame.pri_hdr['OBSTYPE'] == 'NONLIN': + n_nonlin_test += 1 + filename_nonlin_list += [frame.filename] + exptime_nonlin_list += [frame.ext_hdr['EXPTIME']] + cmdgain_nonlin_list += [frame.ext_hdr['CMDGAIN']] + # Testing only non-unity gain frames for Non-linearity + elif frame.pri_hdr['OBSTYPE'] == 'KGAIN': + pass + else: + try: + raise Exception((f'Frame #{idx_frame}: Misidentified calibration' + + f"type in the calibration dataset. OBSTYPE={frame.pri_hdr['OBSTYPE']}")) + except: + raise Exception((f'Frame #{idx_frame}: Unidentified calibration', + 'type in the Non-linearity calibration dataset')) + + # Same number of files as expected + assert n_nonlin_test == n_nonlin_wo_change_total + # Unique exposure time for the mean frame + assert len(set(exptime_mean_frame_list)) == 1 + # Expected exposure time for the mean frame + assert exptime_mean_frame_list[0] == EXPTIME_MEAN_FRAME + # Expected number of frames for the mean frame + assert n_mean_frame == NFRAMES_MEAN_FRAME + # Needs ordering + idx_nonlin_sort = np.argsort(filename_nonlin_list) + # Expected exposure times for Non-linearity + exptime_nonlin_arr = np.array(exptime_nonlin_list)[idx_nonlin_sort] + # Subset of unique non-unity EM gains + nonlin_em_gain_arr = np.unique(cmdgain_nonlin_list) + nonlin_em_gain_arr.sort() + assert np.all(nonlin_em_gain_arr == CMDGAIN_NONLIN) + n_exptime_nonlin = len(EXPTIME_NONLIN) + # Expected exposure times (this test changed the values for all subsets) + exptime_nonlin_arr = np.array(exptime_nonlin_list)[idx_nonlin_sort] + for idx_em, nonlin_em in enumerate(nonlin_em_gain_arr): + assert (exptime_nonlin_arr[(idx_em+1)*n_exptime_nonlin-1] in + exptime_nonlin_arr[idx_em*n_exptime_nonlin:(idx_em+1)*n_exptime_nonlin-1]) + +if __name__ == "__main__": + print('Running test_sort_pupilimg_sorting') + # Testing the sorting algorithm for K-gain calibration + test_kgain_sorting() + print('* K-gain tests passed') + + # Testing the sorting algorithm for non-linearity calibration + test_nonlin_sorting_wo_change() + print('* Non-linearity tests with identical exposure times among non-unity gains passed') + + test_nonlin_sorting_w_change() + print('* Non-linearity tests with different exposure times among non-unity gains passed') + diff --git a/tests/test_walker.py b/tests/test_walker.py index 822f2222..53ba5045 100644 --- a/tests/test_walker.py +++ b/tests/test_walker.py @@ -519,6 +519,37 @@ def test_jit_calibs(): corgidrp.jit_calib_id = old_setting + +def test_generate_multiple_recipes(): + """ + Tests that we can generate multiple recipes when passing in a dataset + """ + # create dirs + datadir = os.path.join(os.path.dirname(__file__), "simdata") + if not os.path.exists(datadir): + os.mkdir(datadir) + outputdir = os.path.join(os.path.dirname(__file__), "walker_output") + if not os.path.exists(outputdir): + os.mkdir(outputdir) + # Make a non-linearity correction calibration file + input_non_linearity_filename = "nonlin_table_TVAC.txt" + test_non_linearity_filename = input_non_linearity_filename.split(".")[0] + ".fits" + test_non_linearity_path = os.path.join(os.path.dirname(__file__), "test_data", test_non_linearity_filename) + + dataset = mocks.create_nonlinear_dataset(test_non_linearity_path, filedir=datadir) + # add vistype + for frame in dataset: + frame.pri_hdr['VISTYPE'] = "PUPILIMG" + dataset.save() + filelist = [frame.filepath for frame in dataset] + + recipes = walker.autogen_recipe(filelist, outputdir) + + assert len(recipes) == 2 + + + + if __name__ == "__main__":# test_autoreducing() test_auto_template_identification() @@ -526,6 +557,7 @@ def test_jit_calibs(): test_skip_missing_calib() test_skip_missing_optional_calib() test_jit_calibs() + test_generate_multiple_recipes()