diff --git a/docs/conf.py b/docs/conf.py index 891807ad..e420d667 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -51,6 +51,7 @@ # class signature) "numpydoc", "sphinx_design", + "sphinx_qt_documentation", ] autosummary_generate = True # Turn on sphinx.ext.autosummary diff --git a/environment.yml b/environment.yml index 697f85c6..feb99e09 100644 --- a/environment.yml +++ b/environment.yml @@ -17,6 +17,8 @@ dependencies: - sphinx - sphinx-automodapi - pydata-sphinx-theme + - lmfit # needed into TRR + - h5py # needed into TRR (should be removed to use I/O methods of containers) - pyqt # [linux] - pip: - zeo @@ -25,3 +27,4 @@ dependencies: - browser-cookie3 - pyqtgraph - pyqt6 # [osx and arm64] + - sphinx-qt-documentation diff --git a/pyproject.toml b/pyproject.toml index d5eb3148..4e3124e4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -27,7 +27,10 @@ dependencies = [ "scipy==1.11.4", "zodb", "zeo", - "pyqt6", + "lmfit", + "h5py", + 'pyqt5 ; platform_system == "Linux"', + 'pyqt6 ; platform_system != "Linux"', # for macOS "pyqtgraph", ] @@ -52,6 +55,7 @@ docs = [ "sphinx-autodoc-typehints", "sphinx-automodapi", "sphinx-design", + "sphinx-qt-documentation", "pydata_sphinx_theme", "numpydoc", "tomli; python_version < '3.11'" diff --git a/src/nectarchain/user_scripts/dmousadi/TRR_scripts/README.md b/src/nectarchain/trr_test_suite/README.md similarity index 100% rename from src/nectarchain/user_scripts/dmousadi/TRR_scripts/README.md rename to src/nectarchain/trr_test_suite/README.md diff --git a/src/nectarchain/trr_test_suite/__init__.py b/src/nectarchain/trr_test_suite/__init__.py new file mode 100644 index 00000000..417f98f8 --- /dev/null +++ b/src/nectarchain/trr_test_suite/__init__.py @@ -0,0 +1,5 @@ +from .gui import TestRunner + +__all__ = [ + "TestRunner", +] diff --git a/src/nectarchain/user_scripts/dmousadi/TRR_scripts/src/deadtime.py b/src/nectarchain/trr_test_suite/deadtime.py similarity index 86% rename from src/nectarchain/user_scripts/dmousadi/TRR_scripts/src/deadtime.py rename to src/nectarchain/trr_test_suite/deadtime.py index 3c2413e4..c01a5c27 100644 --- a/src/nectarchain/user_scripts/dmousadi/TRR_scripts/src/deadtime.py +++ b/src/nectarchain/trr_test_suite/deadtime.py @@ -9,23 +9,33 @@ import numpy as np from astropy import units as u from iminuit import Minuit -from tools_components import DeadtimeTestTool -from utils import ExponentialFitter, deadtime_labels, source_ids_deadtime + +from nectarchain.trr_test_suite.tools_components import DeadtimeTestTool +from nectarchain.trr_test_suite.utils import ( + ExponentialFitter, + deadtime_labels, + source_ids_deadtime, +) def get_args(): - """ - Parses command-line arguments for the deadtime test script. + """Parses command-line arguments for the deadtime test script. Returns: argparse.ArgumentParser: The parsed command-line arguments. """ parser = argparse.ArgumentParser( description="Deadtime tests B-TEL-1260 & B-TEL-1270. \n" - + "According to the nectarchain component interface, you have to set a NECTARCAMDATA environment variable in the folder where you have the data from your runs or where you want them to be downloaded.\n" - + "You have to give a list of runs (run numbers with spaces inbetween), a corresponding source list and an output directory to save the final plot.\n" - + "If the data is not in NECTARCAMDATA, the files will be downloaded through DIRAC.\n For the purposes of testing this script, default data is from the runs used for this test in the TRR document.\n" - + "You can optionally specify the number of events to be processed (default 1000).\n" + + "According to the nectarchain component interface, you have to set a\ + NECTARCAMDATA environment variable in the folder where you have the data\ + from your runs or where you want them to be downloaded.\n" + + "You have to give a list of runs (run numbers with spaces inbetween), a \ + corresponding source list and an output directory to save the final plot.\n" + + "If the data is not in NECTARCAMDATA, the files will be downloaded through \ + DIRAC.\n For the purposes of testing this script, default data is from the\ + runs used for this test in the TRR document.\n" + + "You can optionally specify the number of events to be processed \ + (default 1000).\n" ) parser.add_argument( "-r", @@ -42,7 +52,8 @@ def get_args(): type=int, choices=[0, 1, 2], nargs="+", - help="List of corresponding source for each run: 0 for random generator, 1 for nsb source, 2 for laser", + help="List of corresponding source for each run: 0 for random generator,\ + 1 for nsb source, 2 for laser", required=False, default=source_ids_deadtime, ) @@ -70,15 +81,21 @@ def get_args(): def main(): - """ - Runs the deadtime test script, which performs deadtime tests B-TEL-1260 and B-TEL-1270. + """Runs the deadtime test script, which performs deadtime tests B-TEL-1260 and + B-TEL-1270. - The script takes command-line arguments to specify the list of runs, corresponding sources, number of events to process, and output directory. It then processes the data for each run, performs an exponential fit to the deadtime distribution, and generates two plots: + The script takes command-line arguments to specify the list of runs, corresponding\ + sources, number of events to process, and output directory. It then processes\ + the data for each run, performs an exponential fit to the deadtime\ + distribution, and generates two plots: - 1. A plot of deadtime percentage vs. collected trigger rate, with the CTA requirement indicated. - 2. A plot of the rate from the fit vs. the collected trigger rate, with the relative difference shown in the bottom panel. + 1. A plot of deadtime percentage vs. collected trigger rate, with the CTA\ + requirement indicated. + 2. A plot of the rate from the fit vs. the collected trigger rate, with the\ + relative difference shown in the bottom panel. - The script also saves the generated plots to the specified output directory, and optionally saves them to a temporary output directory for use in a GUI. + The script also saves the generated plots to the specified output directory, and\ + optionally saves them to a temporary output directory for use in a GUI. """ parser = get_args() @@ -189,7 +206,8 @@ def main(): m.limits["deadtime"] = ( 0.6e-6, 1.1e-6, - ) # Put some tigh constrain as the fit will be in trouble when it expect 0. and measured something instead. + ) # Put some tigh constrain as the fit will be in trouble when it expect 0. and + # measured something instead. m.print_level = 2 @@ -202,16 +220,19 @@ def main(): # print(fitted_params_err) print( - f"Dead-Time is {1.e6*fitted_params[1]:.3f} +- {1.e6*fitted_params_err[1]:.3f} µs" + f"Dead-Time is {1.e6*fitted_params[1]:.3f} +- " + f"{1.e6*fitted_params_err[1]:.3f} µs" ) print( - f"Rate is {1./fitted_params[2]:.2f} +- {fitted_params_err[2]/(fitted_params[2]**2):.2f} Hz" + f"Rate is {1./fitted_params[2]:.2f} +-" + f"{fitted_params_err[2]/(fitted_params[2]**2):.2f} Hz" ) print(f"Expected run duration is {fitted_params[0]*fitted_params[2]:.2f} s") fitted_rate.append(1.0 / fitted_params[2]) - # plt.savefig(figurepath + 'deadtime_exponential_fit_nsb_run{}_newfit_cutoff.png'.format(run)) + # plt.savefig(figurepath + 'deadtime_exponential_fit_nsb_run{}_newfit + # _cutoff.png'.format(run)) y = data_content y_fit = fitter.expected_distribution(fitted_params) @@ -236,13 +257,13 @@ def main(): parameter_R2_new_list.append(r2) - deadtime_from_fit = parameter_tau_new_list - deadtime_from_fit_err = parameter_tau_err_new_list + # deadtime_from_fit = parameter_tau_new_list + # deadtime_from_fit_err = parameter_tau_err_new_list lambda_from_fit = parameter_lambda_new_list lambda_from_fit_err = parameter_lambda_err_new_list - A2_from_fit = parameter_A2_new_list - A2_from_fit_err = parameter_A2_err_new_list - R2_from_fit = parameter_R2_new_list + # A2_from_fit = parameter_A2_new_list + # A2_from_fit_err = parameter_A2_err_new_list + # R2_from_fit = parameter_R2_new_list ####################################### # PLOT @@ -260,9 +281,9 @@ def main(): ids = np.array(ids) runlist = np.array(runlist) - ratio_list = [] - collected_rate = [] - err = [] + # ratio_list = [] + # collected_rate = [] + # err = [] for source in range(0, 3): # runl = np.where(ids==source)[0] @@ -399,7 +420,8 @@ def main(): ax1.errorbar( collected_trigger_rate[runl] / 1000, rate[runl], - # xerr=((df_mean_nsb[df_mean_nsb['Run']==run]['Collected_trigger_rate[Hz]_err']))/1000, + # xerr=((df_mean_nsb[df_mean_nsb['Run']==run]['Collected_trigger + # _rate[Hz]_err']))/1000, yerr=rate_err[runl], alpha=0.9, ls=" ", @@ -407,7 +429,8 @@ def main(): color=labels[source]["color"], label=labels[source]["source"], ) - # label = 'Run {} ({} V)'.format(run, df_mean_rg[df_mean_rg['Run']==run]['Voltage[V]'].values[0])) + # label = 'Run {} ({} V)'.format(run, df_mean_rg[df_mean_rg['Run']= + # =run]['Voltage[V]'].values[0])) ax1.legend(frameon=False, prop={"size": 10}, loc="upper left", ncol=1) @@ -424,7 +447,7 @@ def main(): main() -##################################PREVIOUS############################### +# ##################################PREVIOUS############################### # collected_rate = [] @@ -445,8 +468,10 @@ def main(): # for i, run in enumerate(runlist): # deadtime_run, deadtime_bin_run, deadtime_err_run, deadtime_bin_length_run, \ -# total_delta_t_for_busy_time, parameter_A_new, parameter_R_new, parameter_A_err_new, parameter_R_err_new, \ -# first_bin_length, tot_nr_events_histo = deadtime_and_expo_fit(time_tot[i],deadtime_us[i], run) +# total_delta_t_for_busy_time, parameter_A_new, parameter_R_new, parameter_A_err_ +# new, parameter_R_err_new, \ +# first_bin_length, tot_nr_events_histo = deadtime_and_expo_fit(time_tot[i],deadt +# ime_us[i], run) # total_delta_t_for_busy_time_list.append(total_delta_t_for_busy_time) # parameter_A_new_list.append(parameter_A_new) # parameter_R_new_list.append(parameter_R_new) @@ -468,7 +493,8 @@ def main(): # rate_err = (np.array(parameter_R_err_new_list) * 1 / u.us).to(u.kHz).to_value() # A_from_fit = (parameter_A_new_list) # A_from_fit_err = (parameter_A_err_new_list) -# ucts_busy_rate = (np.array(busy_counter[:,-1]) / (np.array(time_tot) * u.s).to(u.s)).to( +# ucts_busy_rate = (np.array(busy_counter[:,-1]) / (np.array(time_tot) * u.s).to(u.s)) +# .to( # u.kHz).value # nr_events_from_histo = (tot_nr_events_histo) # first_bin_delta_t = first_bin_length @@ -478,7 +504,7 @@ def main(): # deadtime_average_err_nsb = np.sqrt(1 / (np.sum(1 / deadtime_bin_length ** 2))) -# ####################################################################################### +# ###################################################################################### # #B-TEL-1260 @@ -502,7 +528,8 @@ def main(): # ratio = rate[i]/freq # ratio_list.append(np.array(ratio)*100) -# ratio_err = np.sqrt((rate_err[i]/freq)**2 + (freq_err*rate[i]/(freq**2))) +# ratio_err = np.sqrt((rate_err[i]/freq)**2 + (freq_err*rate[i]/ +# (freq**2))) # err.append(ratio_err*100) @@ -512,7 +539,8 @@ def main(): # X_sorted = [x for y, x in sorted(zip(Y, X))] # err_sorted = [err for y,err in sorted(zip(Y,err))] -# plt.errorbar(sorted(Y), X_sorted, yerr = err_sorted, alpha=0.6, ls='-', marker='o',color=labels[source]['color'], label = labels[source]['source']) +# plt.errorbar(sorted(Y), X_sorted, yerr = err_sorted, alpha=0.6, ls='-', +# marker='o',color=labels[source]['color'], label = labels[source]['source']) # plt.xlabel('Collected Trigger Rate [kHz]') # plt.ylabel(r'Deadtime [%]') @@ -564,7 +592,8 @@ def main(): # ax1.plot(x, x, color='gray', ls='--', alpha=0.5) # ax2.plot(x, np.zeros(len(x)), color='gray', ls='--', alpha=0.5) -# ax2.fill_between(x, np.ones(len(x))*(-10), np.ones(len(x))*(10), color='gray', alpha=0.1) +# ax2.fill_between(x, np.ones(len(x))*(-10), np.ones(len(x))*(10), color='gray', +# alpha=0.1) # ax2.set_xlabel('Collected Trigger Rate [kHz]') # ax1.set_ylabel(r'Rate from fit [kHz]') @@ -581,11 +610,14 @@ def main(): # #print(collected_triger_rate[runl]) # ax1.errorbar(collected_triger_rate[runl]/1000, # rate[runl], -# #xerr=((df_mean_nsb[df_mean_nsb['Run']==run]['Collected_trigger_rate[Hz]_err']))/1000, +# #xerr=((df_mean_nsb[df_mean_nsb['Run']==run] +# ['Collected_trigger_rate[Hz]_err']))/1000, # yerr=rate_err[runl], # alpha=0.9, -# ls=' ', marker='o', color=labels[source]['color'], label = labels[source]['source']) -# # label = 'Run {} ({} V)'.format(run, df_mean_rg[df_mean_rg['Run']==run]['Voltage[V]'].values[0])) +# ls=' ', marker='o', color=labels[source]['color'], +# label = labels[source]['source']) +# # label = 'Run {} ({} V)'.format(run, df_mean_rg[df_mean_rg['Run']== +# run]['Voltage[V]'].values[0])) # ax1.legend(frameon=False, prop={'size':10}, # loc="upper left", ncol=1) diff --git a/src/nectarchain/user_scripts/dmousadi/TRR_scripts/gui.py b/src/nectarchain/trr_test_suite/gui.py similarity index 88% rename from src/nectarchain/user_scripts/dmousadi/TRR_scripts/gui.py rename to src/nectarchain/trr_test_suite/gui.py index 686a45a0..36789451 100644 --- a/src/nectarchain/user_scripts/dmousadi/TRR_scripts/gui.py +++ b/src/nectarchain/trr_test_suite/gui.py @@ -1,41 +1,60 @@ -""" -The `TestRunner` class is a GUI application that allows the user to run various tests and display the results. - -The class provides the following functionality: -- Allows the user to select a test from a dropdown menu. -- Dynamically generates input fields based on the selected test. -- Runs the selected test and displays the output in a text box. -- Displays the test results in a plot canvas, with navigation buttons to switch between multiple plots. -- Provides a dark-themed UI with custom styling for various UI elements. - -The class uses the PyQt5 library for the GUI implementation and the Matplotlib library for plotting the test results. -""" - import argparse import os import pickle import sys import tempfile -from matplotlib.backends.backend_qt5 import NavigationToolbar2QT as NavigationToolbar from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas from matplotlib.figure import Figure -from PyQt5.QtCore import QProcess, QTimer -from PyQt5.QtWidgets import ( - QApplication, - QComboBox, - QGroupBox, - QHBoxLayout, - QLabel, - QLineEdit, - QMessageBox, - QPushButton, - QSizePolicy, - QSpacerItem, - QTextEdit, - QVBoxLayout, - QWidget, - QWidgetItem, + +try: + from matplotlib.backends.backend_qt import NavigationToolbar2QT as NavigationToolbar + from PyQt6.QtCore import QProcess, QTimer + from PyQt6.QtWidgets import ( + QApplication, + QComboBox, + QGroupBox, + QHBoxLayout, + QLabel, + QLineEdit, + QMessageBox, + QPushButton, + QSizePolicy, + QSpacerItem, + QTextEdit, + QVBoxLayout, + QWidget, + QWidgetItem, + ) +except ImportError: + from PyQt5.QtCore import QProcess, QTimer + from PyQt5.QtWidgets import ( + QApplication, + QComboBox, + QGroupBox, + QHBoxLayout, + QLabel, + QLineEdit, + QMessageBox, + QPushButton, + QSizePolicy, + QSpacerItem, + QTextEdit, + QVBoxLayout, + QWidget, + QWidgetItem, + ) + from matplotlib.backends.backend_qt5 import ( + NavigationToolbar2QT as NavigationToolbar, + ) + +import nectarchain.trr_test_suite.deadtime as deadtime +import nectarchain.trr_test_suite.linearity as linearity +import nectarchain.trr_test_suite.pedestal as pedestal +import nectarchain.trr_test_suite.pix_tim_uncertainty as pix_tim_uncertainty +import nectarchain.trr_test_suite.trigger_timing as trigger_timing +from nectarchain.trr_test_suite import ( + pix_couple_tim_uncertainty as pix_couple_tim_uncertainty, ) # Ensure the src directory is in sys.path @@ -44,15 +63,22 @@ sys.path.append(test_dir) # Import test modules -import deadtime -import linearity -import pedestal -import pix_couple_tim_uncertainty -import pix_tim_uncertainty -import trigger_timing class TestRunner(QWidget): + """The ``TestRunner`` class is a GUI application that allows the\ + user to run various tests and display the results. + The class provides the following functionality: + - Allows the user to select a test from a dropdown menu. + - Dynamically generates input fields based on the selected test. + - Runs the selected test and displays the output in a text box. + - Displays the test results in a plot canvas, with navigation buttons\ + to switch between multiple plots. + - Provides a dark-themed UI with custom styling for various UI elements. + The class uses the PyQt5 library for the GUI implementation and the Matplotlib\ + library for plotting the test results. + """ + test_modules = { "Linearity Test": linearity, "Deadtime Test": deadtime, @@ -72,7 +98,8 @@ def __init__(self): self.init_ui() def init_ui(self): - # Main layout: vertical, dividing into two sections (top for controls/plot, bottom for output) + # Main layout: vertical, dividing into two sections (top for controls/plot + # , bottom for output) main_layout = QVBoxLayout() self.setStyleSheet( @@ -116,13 +143,14 @@ def init_ui(self): border-radius: 5px; /* Rounded corners */ } QPushButton:disabled { - background-color: rgba(76, 175, 80, 0.5); /* Transparent green when disabled */ + background-color: rgba(76, 175, 80, 0.5); /* Transparent green when\ + disabled */ color: rgba(255, 255, 255, 0.5); /* Light text when disabled */ } QPushButton:hover { background-color: #45a049; /* Darker green on hover */ } - """ + """ ) # Horizontal layout for test options (left) and plot canvas (right) @@ -175,7 +203,9 @@ def init_ui(self): # Add a stretchable spacer to push the canvas to the right top_layout.addSpacerItem( - QSpacerItem(40, 20, QSizePolicy.Expanding, QSizePolicy.Minimum) + QSpacerItem( + 40, 20, QSizePolicy.Policy.Expanding, QSizePolicy.Policy.Minimum + ) ) # Create a vertical layout for the plot container @@ -321,11 +351,13 @@ def update_parameters(self): help_button.setToolTip(param_info["help"]) # # Use lambda to capture the current param's help text - # help_button.clicked.connect(lambda _, p=param_info["help"]: self.show_help(p)) + # help_button.clicked.connect(lambda _, p=param_info["help"]: + # self.show_help(p)) # Add the help button to the layout (next to the label) param_layout.addWidget(help_button) - param_layout.addStretch() # Add stretch to push the help button to the right + param_layout.addStretch() # Add stretch to push the help button to + # the right # Add the horizontal layout (label + help button) to the main layout self.param_layout.addLayout(param_layout) @@ -396,7 +428,9 @@ def run_test(self): self.output_text_edit.clear() self.process = QProcess(self) - self.process.setProcessChannelMode(QProcess.MergedChannels) + self.process.setProcessChannelMode( + QProcess.ProcessChannelMode.MergedChannels + ) self.process.readyReadStandardOutput.connect(self.read_process_output) self.process.finished.connect(self.process_finished) @@ -531,4 +565,4 @@ def cleanup_tempdir(self): if __name__ == "__main__": app = QApplication(sys.argv) ex = TestRunner() - sys.exit(app.exec_()) + sys.exit(app.exec()) diff --git a/src/nectarchain/user_scripts/dmousadi/TRR_scripts/src/linearity.py b/src/nectarchain/trr_test_suite/linearity.py similarity index 87% rename from src/nectarchain/user_scripts/dmousadi/TRR_scripts/src/linearity.py rename to src/nectarchain/trr_test_suite/linearity.py index ea62de75..ab4a6cd5 100644 --- a/src/nectarchain/user_scripts/dmousadi/TRR_scripts/src/linearity.py +++ b/src/nectarchain/trr_test_suite/linearity.py @@ -8,8 +8,9 @@ import matplotlib.pyplot as plt import numpy as np from lmfit.models import Model -from tools_components import LinearityTestTool -from utils import ( + +from nectarchain.trr_test_suite.tools_components import LinearityTestTool +from nectarchain.trr_test_suite.utils import ( err_ratio, err_sum, linear_fit_function, @@ -19,8 +20,7 @@ def get_args(): - """ - Parses command-line arguments for the linearity test script. + """Parses command-line arguments for the linearity test script. Returns: argparse.ArgumentParser: The parsed command-line arguments. @@ -28,10 +28,18 @@ def get_args(): parser = argparse.ArgumentParser( description="Linearity test B-TEL-1390 & Intensity resolution B-TEL-1010. \n" - + "According to the nectarchain component interface, you have to set a NECTARCAMDATA environment variable in the folder where you have the data from your runs or where you want them to be downloaded.\n" - + "You have to give a list of runs (run numbers with spaces inbetween), a corresponding transmission list and an output directory to save the final plot.\n" - + "If the data is not in NECTARCAMDATA, the files will be downloaded through DIRAC.\n For the purposes of testing this script, default data is from the runs used for this test in the TRR document.\n" - + "You can optionally specify the number of events to be processed (default 500) and the number of pixels used (default 70).\n" + + "According to the nectarchain component interface, \ + you have to set a NECTARCAMDATA environment variable\ + in the folder where you have the data from your runs\ + or where you want them to be downloaded.\n" + + "You have to give a list of runs (run numbers with spaces inbetween), a\ + corresponding transmission list and an output directory to save the \ + final plot.\n" + + "If the data is not in NECTARCAMDATA, the files will be downloaded through\ + DIRAC.\n For the purposes of testing this script, default data is from the\ + runs used for this test in the TRR document.\n" + + "You can optionally specify the number of events to be processed\ + (default 500) and the number of pixels used (default 70).\n" ) parser.add_argument( "-r", @@ -76,18 +84,30 @@ def get_args(): def main(): """ - The `main()` function is the entry point of the linearity test script. It parses the command-line arguments, processes the specified runs, and generates plots to visualize the linearity and charge resolution of the detector. The function performs the following key steps: - - 1. Parses the command-line arguments using the `get_args()` function, which sets up the argument parser and handles the input parameters. - 2. Iterates through the specified run list, processing each run using the `LinearityTestTool` class. This tool initializes, sets up, starts, and finishes the processing for each run, returning the relevant output data. - 3. Normalizes the high-gain and low-gain charge values using the charge value at 0.01 transmission. + The `main()` function is the entry point of the linearity test script. It parses \ + the command-line arguments, processes the specified runs, and generates plots\ + to visualize the linearity and charge resolution of the detector. The\ + function performs the following key steps: + 1. Parses the command-line arguments using the `get_args()` function, which sets up\ + the argument parser and handles the input parameters. + 2. Iterates through the specified run list, processing each run using the\ + `LinearityTestTool` class. This tool initializes, sets up, starts, and finishes\ + the processing for each run, returning the relevant output data. + 3. Normalizes the high-gain and low-gain charge values using the charge value at\ + 0.01 transmission. 4. Generates three subplots: - - The first subplot shows the estimated charge vs. the true charge, with the fitted linear function for both high-gain and low-gain channels. - - The second subplot shows the residuals between the estimated and true charge, as a percentage. - - The third subplot shows the ratio of high-gain to low-gain charge, with a fitted linear function. - 5. Saves the generated plots to the specified output directory, and optionally saves temporary plot files for a GUI. - 6. Generates an additional plot to visualize the charge resolution, including the statistical limit. - 7. Saves the charge resolution plot to the specified output directory, and optionally saves a temporary plot file for a GUI. + - The first subplot shows the estimated charge vs. the true charge, with the fitted\ + linear function for both high-gain and low-gain channels. + - The second subplot shows the residuals between the estimated and true charge, as\ + a percentage. + - The third subplot shows the ratio of high-gain to low-gain charge, with a fitted\ + linear function. + 5. Saves the generated plots to the specified output directory, and optionally\ + saves temporary plot files for a GUI. + 6. Generates an additional plot to visualize the charge resolution, including the\ + statistical limit. + 7. Saves the charge resolution plot to the specified output directory, and\ + optionally saves a temporary plot file for a GUI. """ parser = get_args() args = parser.parse_args() @@ -178,7 +198,7 @@ def main(): axs[2].axvspan(10, 1000, alpha=0.2, color="orange") axs[2].set_xlabel("Illumination charge [p.e.]") - for channel, (channel_charge, channel_std, name) in enumerate( + for _, (channel_charge, channel_std, name) in enumerate( zip( [charge_norm_hg, charge_norm_lg], [std_norm_hg, std_norm_lg], diff --git a/src/nectarchain/user_scripts/dmousadi/TRR_scripts/src/pedestal.py b/src/nectarchain/trr_test_suite/pedestal.py similarity index 82% rename from src/nectarchain/user_scripts/dmousadi/TRR_scripts/src/pedestal.py rename to src/nectarchain/trr_test_suite/pedestal.py index b20d2fdb..58abc9eb 100644 --- a/src/nectarchain/user_scripts/dmousadi/TRR_scripts/src/pedestal.py +++ b/src/nectarchain/trr_test_suite/pedestal.py @@ -7,13 +7,13 @@ import matplotlib.pyplot as plt import numpy as np -from tools_components import PedestalTool -from utils import adc_to_pe, pe2photons + +from nectarchain.trr_test_suite.tools_components import PedestalTool +from nectarchain.trr_test_suite.utils import adc_to_pe, pe2photons def get_args(): - """ - Parses command-line arguments for the linearity test script. + """Parses command-line arguments for the linearity test script. Returns: argparse.ArgumentParser: The parsed command-line arguments. @@ -21,10 +21,17 @@ def get_args(): parser = argparse.ArgumentParser( description="Pedestal substraction test B-TEL-1370.\n" - + "According to the nectarchain component interface, you have to set a NECTARCAMDATA environment variable in the folder where you have the data from your runs or where you want them to be downloaded.\n" - + "You have to give a list of runs (run numbers with spaces inbetween) and an output directory to save the final plot.\n" - + "If the data is not in NECTARCAMDATA, the files will be downloaded through DIRAC.\n For the purposes of testing this script, default data is from the runs used for this test in the TRR document.\n" - + "You can optionally specify the number of events to be processed (default 1200).\n" + + "According to the nectarchain component interface, you have to set\ + a NECTARCAMDATA environment variable in the folder where you have\ + the data from your runs or where you want them to be downloaded.\n" + + "You have to give a list of runs (run numbers with spaces inbetwee\ + n) and an output directory to save the final plot.\n" + + "If the data is not in NECTARCAMDATA, the files will be\ + downloaded through DIRAC.\n For the purposes of testing this script,\ + default data is from the runs used for this test in the\ + TRR document.\n" + + "You can optionally specify the number of events to be processed\ + (default 1200).\n" ) parser.add_argument( "-r", @@ -39,7 +46,8 @@ def get_args(): "-e", "--evts", type=int, - help="Number of events to process from each run. Default is 1200. 4000 or more gives best results but takes some time", + help="Number of events to process from each run. Default is 1200. 4000 or more\ + gives best results but takes some time", required=False, default=10, ) @@ -59,13 +67,15 @@ def get_args(): def main(): - """ - The main function that runs the pedestal subtraction test. It parses command-line arguments, processes the specified runs, and generates two plots: + """The main function that runs the pedestal subtraction test. It parses command-line + arguments, processes the specified runs, and generates two plots: 1. A 2D heatmap of the pedestal RMS for all events and pixels. - 2. A line plot of the mean pedestal RMS for each pixel, with the CTA requirement range highlighted. + 2. A line plot of the mean pedestal RMS for each pixel, with the CTA requirement\ + range highlighted. - The function also saves the generated plots to the specified output directory, and optionally saves the first plot to a temporary output file. + The function also saves the generated plots to the specified output directory,\ + and optionally saves the first plot to a temporary output file. """ parser = get_args() diff --git a/src/nectarchain/user_scripts/dmousadi/TRR_scripts/src/pix_couple_tim_uncertainty.py b/src/nectarchain/trr_test_suite/pix_couple_tim_uncertainty.py similarity index 70% rename from src/nectarchain/user_scripts/dmousadi/TRR_scripts/src/pix_couple_tim_uncertainty.py rename to src/nectarchain/trr_test_suite/pix_couple_tim_uncertainty.py index f57f145f..2138ae06 100644 --- a/src/nectarchain/user_scripts/dmousadi/TRR_scripts/src/pix_couple_tim_uncertainty.py +++ b/src/nectarchain/trr_test_suite/pix_couple_tim_uncertainty.py @@ -5,29 +5,38 @@ import matplotlib.pyplot as plt import numpy as np -from tools_components import ToMPairsTool + +from nectarchain.trr_test_suite.tools_components import ToMPairsTool def get_args(): - """ - Parses command-line arguments for the pix_couple_tim_uncertainty_test.py script. + """Parses command-line arguments for the pix_couple_tim_uncertainty_test.py script. Returns: argparse.ArgumentParser: The parsed command-line arguments. """ parser = argparse.ArgumentParser( - description="Time resolution (timing uncertainty between couples of pixels) test B-TEL-1030.\n" - + "According to the nectarchain component interface, you have to set a NECTARCAMDATA environment variable in the folder where you have the data from your runs or where you want them to be downloaded.\n" - + "You have to give a list of runs (run numbers with spaces inbetween) and an output directory to save the final plot.\n" - + "If the data is not in NECTARCAMDATA, the files will be downloaded through DIRAC.\n For the purposes of testing this script, default data is from the runs used for this test in the TRR document.\n" - + "You can optionally specify the number of events to be processed (default 1000). Takes a lot of time.\n" + description="Time resolution (timing uncertainty between couples of pixels)\ + test B-TEL-1030.\n" + + "According to the nectarchain component interface, you have to set a\ + NECTARCAMDATA\ + environment variable in the folder where you have the data from your runs\ + or where you want them to be downloaded.\n" + + "You have to give a list of runs (run numbers with spaces inbetween) and\ + an output directory to save the final plot.\n" + + "If the data is not in NECTARCAMDATA, the files will be downloaded through\ + DIRAC.\n For the purposes of testing this script, default data is from the\ + runs used for this test in the TRR document.\n" + + "You can optionally specify the number of events to be processed (default\ + 1000). Takes a lot of time.\n" ) parser.add_argument( "-r", "--runlist", type=int, nargs="+", - help="List of runs (numbers separated by space). You can put just one run, default 3292", + help="List of runs (numbers separated by space). You can put just one run,\ + default 3292", required=False, default=[3292], ) @@ -35,7 +44,8 @@ def get_args(): "-e", "--evts", type=int, - help="Number of events to process from each run. Default is 100. 1000 or more gives best results but takes some time", + help="Number of events to process from each run. Default is 100. 1000 or\ + more gives best results but takes some time", required=False, default=100, ) @@ -45,7 +55,9 @@ def get_args(): type=str, help=".csv file with pmt transit time corrections", required=False, - default="../transit_time/hv_pmt_tom_correction_laser_measurement_per_pixel_fit_sqrt_hv_newmethod.csv", + default="../transit_time/" + "hv_pmt_tom_correction_laser_measurement_per_pixel_fit" + "sqrt_hv_newmethod.csv", ) parser.add_argument( "-o", @@ -63,20 +75,26 @@ def get_args(): def main(): - """ - Generates a plot of the RMS of the time-of-maximum (TOM) difference for pairs of pixels, with a visualization of the CTA requirement. + """Generates a plot of the RMS of the time-of-maximum (TOM) difference for pairs of + pixels, with a visualization of the CTA requirement. - The script processes a list of runs, calculates the TOM difference with and without transit time corrections, and plots the distribution of the RMS of the corrected TOM differences. The CTA requirement of 2 ns RMS is visualized on the plot. + The script processes a list of runs, calculates the TOM difference with and without + transit time corrections, and plots the distribution of the RMS of the corrected TOM + differences. The CTA requirement of 2 ns RMS is visualized on the plot. - The script takes several command-line arguments, including the list of runs to process, the number of events to process per run, the path to a CSV file with PMT transit time corrections, and the output directory for the plot. + The script takes several command-line arguments, including the list of runs to + process, the number of events to process per run, the path to a CSV file with PMT + transit time corrections, and the output directory for the plot. - If a temporary output directory is specified, the plot is also saved to a pickle file in that directory for the gui to use. + If a temporary output directory is specified, the plot is also saved to a pickle + file in that directory for the gui to use. """ parser = get_args() args = parser.parse_args() - tt_path = "/Users/dm277349/nectarchain_data/transit_time/hv_pmt_tom_correction_laser_measurement_per_pixel_fit_sqrt_hv_newmethod.csv" + tt_path = "/Users/dm277349/nectarchain_data/transit_time/\ + hv_pmt_tom_correction_laser_measurement_per_pixel_fit_sqrt_hv_newmethod.csv" runlist = args.runlist nevents = args.evts @@ -153,7 +171,7 @@ def main(): arrowprops=dict(color="C4", alpha=0.7, lw=3, arrowstyle="->"), ) - plt.xlabel("RMS of $\Delta t_{\mathrm{TOM}}$ for pairs of pixels [ns]") + plt.xlabel(r"RMS of $\Delta t_{\mathrm{TOM}}$ for pairs of pixels [ns]") plt.ylabel("Normalized entries") plt.gcf() diff --git a/src/nectarchain/user_scripts/dmousadi/TRR_scripts/src/pix_tim_uncertainty.py b/src/nectarchain/trr_test_suite/pix_tim_uncertainty.py similarity index 77% rename from src/nectarchain/user_scripts/dmousadi/TRR_scripts/src/pix_tim_uncertainty.py rename to src/nectarchain/trr_test_suite/pix_tim_uncertainty.py index 2c26575b..53c644c1 100644 --- a/src/nectarchain/user_scripts/dmousadi/TRR_scripts/src/pix_tim_uncertainty.py +++ b/src/nectarchain/trr_test_suite/pix_tim_uncertainty.py @@ -7,23 +7,29 @@ import matplotlib.pyplot as plt import numpy as np -from tools_components import TimingResolutionTestTool -from utils import pe2photons, photons2pe + +from nectarchain.trr_test_suite.tools_components import TimingResolutionTestTool +from nectarchain.trr_test_suite.utils import pe2photons, photons2pe def get_args(): - """ - Parses command-line arguments for the pixel timing uncertainty test script. + """Parses command-line arguments for the pixel timing uncertainty test script. Returns: argparse.ArgumentParser: The parsed command-line arguments. """ parser = argparse.ArgumentParser( description="Systematic pixel timing uncertainty test B-TEL-1380.\n" - + "According to the nectarchain component interface, you have to set a NECTARCAMDATA environment variable in the folder where you have the data from your runs or where you want them to be downloaded.\n" - + "You have to give a list of runs (run numbers with spaces inbetween) and an output directory to save the final plot.\n" - + "If the data is not in NECTARCAMDATA, the files will be downloaded through DIRAC.\n For the purposes of testing this script, default data is from the runs used for this test in the TRR document.\n" - + "You can optionally specify the number of events to be processed (default 1200) and the number of pixels used (default 70).\n" + + "According to the nectarchain component interface, you have to set a\ + NECTARCAMDATA environment variable in the folder where you have the data\ + from your runs or where you want them to be downloaded.\n" + + "You have to give a list of runs (run numbers with spaces inbetween)\ + and an output directory to save the final plot.\n" + + "If the data is not in NECTARCAMDATA, the files will be downloaded through\ + DIRAC.\n For the purposes of testing this script, default data is from the\ + runs used for this test in the TRR document.\n" + + "You can optionally specify the number of events to be processed (default\ + 1200) and the number of pixels used (default 70).\n" ) parser.add_argument( "-r", @@ -42,7 +48,8 @@ def get_args(): required=False, default=100, ) - # parser.add_argument('-p','--pixels', type = int, help='Number of pixels used. Default is 70', required=False, default=70) + # parser.add_argument('-p','--pixels', type = int, help='Number of pixels used. + # Default is 70', required=False, default=70) parser.add_argument( "-o", "--output", @@ -58,12 +65,14 @@ def get_args(): def main(): - """ - Processes the pixel timing uncertainty test data and generates a plot. + """Processes the pixel timing uncertainty test data and generates a plot. - The function processes the data from the specified list of runs, calculates the weighted mean RMS and RMS error, and generates a plot of the results. The plot is saved to the specified output directory. + The function processes the data from the specified list of runs, calculates the + weighted mean RMS and RMS error, and generates a plot of the results. The plot is + saved to the specified output directory. - If a temporary output directory is provided, the plot is also saved to a pickle file in that directory for the gui to use. + If a temporary output directory is provided, the plot is also saved to a pickle file + in that directory for the gui to use. """ parser = get_args() @@ -129,7 +138,8 @@ def main(): rms_no_fit_weighted_err = [] for run in range(len(runlist)): - # rms_mu_weighted.append(np.sum(rms_mu[run]*weights_mu_pix[run])/np.sum(weights_mu_pix[run])) + # rms_mu_weighted.append(np.sum(rms_mu[run]*weights_mu_pix[run])/ + # np.sum(weights_mu_pix[run])) # rms_mu_weighted_err.append(np.sqrt(1/np.sum(weights_mu_pix[run]))) rms_no_fit_weighted.append( np.nansum(rms_no_fit[run] * weights_no_fit_pix[run]) @@ -227,15 +237,33 @@ def main(): # from test_tools_components import TimingResolutionTestTool # import argparse -# parser = argparse.ArgumentParser(description='Systematic pixel timing uncertainty test B-TEL-1380.\n' -# +'According to the nectarchain component interface, you have to set a NECTARCAMDATA environment variable in the folder where you have the data from your runs or where you want them to be downloaded.\n' -# +'You have to give a list of runs (run numbers with spaces inbetween) and an output directory to save the final plot.\n' -# +'If the data is not in NECTARCAMDATA, the files will be downloaded through DIRAC.\n For the purposes of testing this script, default data is from the runs used for this test in the TRR document.\n' -# +'You can optionally specify the number of events to be processed (default 1200) and the number of pixels used (default 70).\n') -# parser.add_argument('-r','--runlist', type = int, nargs='+', help='List of runs (numbers separated by space)', required=False) -# parser.add_argument('-e','--evts', type = int, help='Number of events to process from each run. Default is 1200. 4000 or more gives best results but takes some time', required=False, default=1200) -# #parser.add_argument('-p','--pixels', type = int, help='Number of pixels used. Default is 70', required=False, default=70) -# parser.add_argument('-o','--output', type=str, help='Output directory. If none, plot will be saved in current directory', required=False, default='./') +# parser = argparse.ArgumentParser(description='Sy +# stematic pixel timing uncertainty test B-TEL-1380.\n' +# +'According to +# the nectarchain component interface, you have to set a NECTARCAMDATA environment +# variable in the folder where you have the data from your runs or where you want them +# to be downloaded.\n' +# +'You have to g +# ive a list of runs (run numbers with spaces inbetween) and an output directory to save +# the final plot.\n' +# +'If the data i +# s not in NECTARCAMDATA, the files will be downloaded through DIRAC.\n For the purposes +# of testing this script, default data is from the runs used for this test in the TRR +# document.\n' +# +'You can optio +# nally specify the number of events to be processed (default 1200) and the +# number of +# pixels used (default 70).\n') +# parser.add_argument('-r','--runlist', type = int +# , nargs='+', help='List of runs (numbers separated by space)', required=False) +# parser.add_argument('-e','--evts', type = int, h +# elp='Number of events to process from each run. Default is 1200. 4000 or more gives +# best results but takes some time', required=False, default=1200) +# #parser.add_argument('-p','--pixels', type = int +# , help='Number of pixels used. Default is 70', required=False, default=70) +# parser.add_argument('-o','--output', type=str, h +# elp='Output directory. If none, plot will be saved in current directory', +# required=False, default='./') # args = parser.parse_args() @@ -257,7 +285,8 @@ def main(): # for run in runlist: # print("PROCESSING RUN {}".format(run)) # tool = TimingResolutionTestTool( -# progress_bar=True, run_number=run, max_events=nevents, log_level=20, window_width=16, overwrite=True +# progress_bar=True, run_number=run, max_events=nevents, log_level=20, +# window_width=16, overwrite=True # ) # tool.initialize() # tool.setup() @@ -301,9 +330,11 @@ def main(): # rms_no_fit_weighted_err = [] # for run in range(len(runlist)): -# # rms_mu_weighted.append(np.sum(rms_mu[run]*weights_mu_pix[run])/np.sum(weights_mu_pix[run])) +# # rms_mu_weighted.append(np.sum(rms_mu[run]*weights_mu_pix[run]) +# /np.sum(weights_mu_pix[run])) # # rms_mu_weighted_err.append(np.sqrt(1/np.sum(weights_mu_pix[run]))) -# rms_no_fit_weighted.append(np.nansum(rms_no_fit[run]*weights_no_fit_pix[run])/np.nansum(weights_no_fit_pix[run])) +# rms_no_fit_weighted.append(np.nansum(rms_no_fit[run]*weights_no_fit_pix[run]) +# /np.nansum(weights_no_fit_pix[run])) # rms_no_fit_weighted_err.append(np.sqrt(1/np.nansum(weights_no_fit_pix[run]))) @@ -325,7 +356,8 @@ def main(): # plt.axhline(1, ls='--', color='C4', alpha=0.6) -# plt.axhline(1/np.sqrt(12), ls='--', color='gray', alpha=0.7, label='Quantification rms noise') +# plt.axhline(1/np.sqrt(12), ls='--', color='gray', alpha=0.7, label= +# 'Quantification rms noise') # plt.axvspan(20, 1000, alpha=0.1, color='C4') diff --git a/src/nectarchain/user_scripts/dmousadi/TRR_scripts/src/tools_components.py b/src/nectarchain/trr_test_suite/tools_components.py similarity index 83% rename from src/nectarchain/user_scripts/dmousadi/TRR_scripts/src/tools_components.py rename to src/nectarchain/trr_test_suite/tools_components.py index a1a4c52e..5154c93d 100644 --- a/src/nectarchain/user_scripts/dmousadi/TRR_scripts/src/tools_components.py +++ b/src/nectarchain/trr_test_suite/tools_components.py @@ -12,21 +12,25 @@ from ctapipe_io_nectarcam.containers import NectarCAMDataContainer from scipy.interpolate import InterpolatedUnivariateSpline from scipy.signal import find_peaks -from utils import adc_to_pe, argmedian from nectarchain.data.container import NectarCAMContainer from nectarchain.makers import EventsLoopNectarCAMCalibrationTool from nectarchain.makers.component import NectarCAMComponent +from nectarchain.trr_test_suite.utils import adc_to_pe, argmedian # overriding so we can have maxevents in the path def _init_output_path(self): - """ - Initializes the output path for the NectarCAMCalibrationTool. + """Initializes the output path for the NectarCAMCalibrationTool. - If `max_events` is `None`, the output file name will be in the format `{self.name}_run{self.run_number}.h5`. Otherwise, the file name will be in the format `{self.name}_run{self.run_number}_maxevents{self.max_events}.h5`. + If `max_events` is `None`, the output file name will be in the format\ + `{self.name}_run{self.run_number}.h5`. Otherwise, the file name will\ + be in the format\ + `{self.name}_run{self.run_number}_maxevents{self.max_events}.h5`. - The output path is constructed by joining the `NECTARCAMDATA` environment variable (or `/tmp` if not set) with the `tests` subdirectory and the generated file name. + The output path is constructed by joining the `NECTARCAMDATA` environment variable\ + (or `/tmp` if not set) with the `tests` subdirectory and the generated\ + file name. """ if self.max_events is None: @@ -42,8 +46,8 @@ def _init_output_path(self): class ChargeContainer(NectarCAMContainer): - """ - This class contains fields that store various properties and data related to NectarCAM events, including: + """This class contains fields that store various properties and data related to + NectarCAM events, including: - `run_number`: The run number associated with the waveforms. - `npixels`: The number of effective pixels. @@ -81,15 +85,22 @@ class ChargeContainer(NectarCAMContainer): class ChargeComp(NectarCAMComponent): - """ - This class `ChargeComp` is a NectarCAMComponent that processes NectarCAM event data. It extracts the charge information from the waveforms of each event, handling cases of saturated or noisy events. The class has the following configurable parameters: + """This class `ChargeComp` is a NectarCAMComponent that processes NectarCAM event + data. It extracts the charge information from the waveforms of each event, handling + cases of saturated or noisy events. The class has the following configurable + parameters: - `window_shift`: The time in ns before the peak to extract the charge. - `window_width`: The duration of the charge extraction window in ns. - The `__init__` method initializes important members of the component, such as timestamps, event type, event ids, pedestal and charge for both gain channels. - The `__call__` method is the main processing logic, which is called for each event. It extracts the charge information for both high gain and low gain channels, handling various cases such as saturated events and events with no signal. - The `finish` method collects all the processed data and returns a `ChargeContainer` object containing the run number, number of pixels, pixel IDs, UCTS timestamps, event types, event IDs, and the high and low gain charge values. + The `__init__` method initializes important members of the component, such as\ + timestamps, event type, event ids, pedestal and charge for both gain channels. + The `__call__` method is the main processing logic, which is called for each event.\ + It extracts the charge information for both high gain and low gain channels,\ + handling various cases such as saturated events and events with no signal. + The `finish` method collects all the processed data and returns a `ChargeContainer`\ + object containing the run number, number of pixels, pixel IDs, UCTS timestamps,\ + event types, event IDs, and the high and low gain charge values. """ window_shift = Integer( @@ -106,7 +117,8 @@ def __init__(self, subarray, config=None, parent=None, *args, **kwargs): super().__init__( subarray=subarray, config=config, parent=parent, *args, **kwargs ) - ## If you want you can add here members of MyComp, they will contain interesting quantity during the event loop process + # If you want you can add here members of MyComp, they will contain + # interesting quantity during the event loop process self.__ucts_timestamp = [] self.__event_type = [] @@ -118,7 +130,7 @@ def __init__(self, subarray, config=None, parent=None, *args, **kwargs): self.__charge_hg = [] self.__charge_lg = [] - ##This method need to be defined ! + # This method need to be defined ! def __call__(self, event: NectarCAMDataContainer, *args, **kwargs): self.__event_id.append(np.uint32(event.index.event_id)) @@ -131,7 +143,7 @@ def __call__(self, event: NectarCAMDataContainer, *args, **kwargs): wfs.append(event.r0.tel[0].waveform[constants.HIGH_GAIN][self.pixels_id]) wfs.append(event.r0.tel[0].waveform[constants.LOW_GAIN][self.pixels_id]) - #####THE JOB IS HERE###### + # ###THE JOB IS HERE#### for i, (pedestal, charge) in enumerate( zip( [self.__pedestal_hg, self.__pedestal_lg], @@ -181,9 +193,11 @@ def __call__(self, event: NectarCAMDataContainer, *args, **kwargs): integral[pix] = 0 else: - # x = np.linspace(0,signal_stop[pix]-signal_start[pix],signal_stop[pix]-signal_start[pix]) + # x = np.linspace(0,signal_stop[pix]-signal_start[pix], + # signal_stop[pix]-signal_start[pix]) # spl = UnivariateSpline(x,y) - # integral[pix] = spl.integral(0,signal_stop[pix]-signal_start[pix]) + # integral[pix] = spl.integral(0,signal_stop[pix]- + # signal_start[pix]) integral[pix] = np.sum( wf[pix, signal_start[pix] : signal_stop[pix]] @@ -193,7 +207,7 @@ def __call__(self, event: NectarCAMDataContainer, *args, **kwargs): charge.append(chg) - ##This method need to be defined ! + # This method need to be defined ! def finish(self): output = ChargeContainer( run_number=ChargeContainer.fields["run_number"].type(self._run_number), @@ -218,10 +232,16 @@ def finish(self): class LinearityTestTool(EventsLoopNectarCAMCalibrationTool): - """ - This class, `LinearityTestTool`, is a subclass of `EventsLoopNectarCAMCalibrationTool`. It is responsible for performing a linearity test on NectarCAM data. The class has a `componentsList` attribute that specifies the list of NectarCAM components to be applied. - - The `finish` method is the main functionality of this class. It reads the charge data from the output file, calculates the mean charge, standard deviation, and standard error for both the high gain and low gain channels, and returns these values. This information can be used to assess the linearity of the NectarCAM system. + """This class, `LinearityTestTool`, is a subclass of + `EventsLoopNectarCAMCalibrationTool`. It is responsible for performing a linearity + test on NectarCAM data. The class has a `componentsList` attribute that specifies + the list of NectarCAM components to be applied. + + The `finish` method is the main functionality of this class. It reads the charge\ + data from the output file, calculates the mean charge, standard deviation,\ + and standard error for both the high gain and low gain channels, and\ + returns these values. This information can be used to assess\ + the linearity of the NectarCAM system. """ name = "LinearityTestTool" @@ -255,7 +275,7 @@ def finish(self, *args, **kwargs): charge_hg.extend(tup[6]) charge_lg.extend(tup[7]) - except: + except Exception: break output_file.close() @@ -288,7 +308,8 @@ class ToMContainer(NectarCAMContainer): event_type (np.ndarray[np.uint8]): The trigger event types. event_id (np.ndarray[np.uint32]): The event IDs. charge_hg (np.ndarray[np.float64]): The mean high gain charge per event. - tom_no_fit (np.ndarray[np.float64]): The time of maximum from the data (no fitting). + tom_no_fit (np.ndarray[np.float64]): The time of maximum from\ + the data (no fitting). good_evts (np.ndarray[np.uint32]): The IDs of the good (non-cosmic ray) events. """ @@ -317,11 +338,13 @@ class ToMContainer(NectarCAMContainer): ) # tom_mu = Field( - # type=np.ndarray, dtype=np.float64, ndim=2, description="Time of maximum of signal fitted with gaussian" + # type=np.ndarray, dtype=np.float64, ndim=2, description="Time of maximum of + # signal fitted with gaussian" # ) # tom_sigma = Field( - # type=np.ndarray, dtype=np.float64, ndim=2, description="Time of fitted maximum sigma" + # type=np.ndarray, dtype=np.float64, ndim=2, description="Time of fitted + # maximum sigma" # ) tom_no_fit = Field( type=np.ndarray, @@ -338,14 +361,25 @@ class ToMContainer(NectarCAMContainer): class ToMComp(NectarCAMComponent): - """ - This class, `ToMComp`, is a component of the NectarCAM system that is responsible for processing waveform data. It has several configurable parameters, including the width and shift before the peak of the time window for charge extraction, the peak height threshold. - - The `__init__` method initializes some important component members, such as timestamps, event type, event ids, pedestal and charge values for both gain channels. - - The `__call__` method is the main entry point for processing an event. It extracts the waveform data, calculates the pedestal, charge, and time of maximum (ToM) for each pixel, and filters out events that do not meet the peak height threshold. The results are stored in various member variables, which are then returned in the `finish` method. - - The `finish` method collects the processed data from the member variables and returns a `ToMContainer` object, which contains the run number, number of pixels, pixel IDs, UCTS timestamps, event types, event IDs, high-gain charge, ToM without fitting, and IDs of good (non-cosmic ray) events. + """This class, `ToMComp`, is a component of the NectarCAM system that is responsible + for processing waveform data. It has several configurable parameters, including the + width and shift before the peak of the time window for charge extraction, the peak + height threshold. + + The `__init__` method initializes some important component members, such as\ + timestamps, event type, event ids, pedestal and charge values for both gain\ + channels. + + The `__call__` method is the main entry point for processing an event. It extracts\ + the waveform data, calculates the pedestal, charge, and time of maximum (ToM)\ + for each pixel, and filters out events that do not meet the peak\ + height threshold. The results are stored in various member variables,\ + which are then returned in the `finish` method. + + The `finish` method collects the processed data from the member variables and\ + returns a `ToMContainer` object, which contains the run number, number of\ + pixels, pixel IDs, UCTS timestamps, event types, event IDs, high-gain\ + charge, ToM without fitting, and IDs of good (non-cosmic ray) events. """ window_shift = Integer( @@ -367,7 +401,8 @@ def __init__(self, subarray, config=None, parent=None, *args, **kwargs): super().__init__( subarray=subarray, config=config, parent=parent, *args, **kwargs ) - ## If you want you can add here members of MyComp, they will contain interesting quantity during the event loop process + # If you want you can add here members of MyComp, they will contain + # interesting quantity during the event loop process self.__ucts_timestamp = [] self.__event_type = [] @@ -386,7 +421,7 @@ def __init__(self, subarray, config=None, parent=None, *args, **kwargs): self.__ff_event_ind = -1 - ##This method need to be defined ! + # This method need to be defined ! def __call__(self, event: NectarCAMDataContainer, *args, **kwargs): self.__event_id.append(np.uint32(event.index.event_id)) @@ -398,7 +433,7 @@ def __call__(self, event: NectarCAMDataContainer, *args, **kwargs): self.__ff_event_ind += 1 - #####THE JOB IS HERE###### + # #####THE JOB IS HERE###### for i, (pedestal, charge, tom_no_fit) in enumerate( zip([self.__pedestal_hg], [self.__charge_hg], [self.__tom_no_fit]) @@ -483,7 +518,8 @@ def __call__(self, event: NectarCAMDataContainer, *args, **kwargs): # # fit # model = Model(gaus) - # params = model.make_params(a=yi[peaks[max_peak_index]] * 3, mu=mean, sigma=sigma) + # params = model.make_params(a=yi[peaks[max_peak_index]] * 3, + # mu=mean, sigma=sigma) # result = model.fit(y_fit, params, x=x_fit) # result_sigma = result.params['sigma'].value @@ -510,12 +546,16 @@ def __call__(self, event: NectarCAMDataContainer, *args, **kwargs): # change_grad_pos_left = 3 # change_grad_pos_right = 3 # mean = xi[peaks[max_peak_index]] - # sigma = change_grad_pos_right + change_grad_pos_left # define window for the gaussian fit + # sigma = change_grad_pos_right + change_grad_pos_left # define + # window for the gaussian fit - # x_fit = xi[peaks[max_peak_index]-change_grad_pos_left:peaks[max_peak_index]+change_grad_pos_right] - # y_fit = yi[peaks[max_peak_index]-change_grad_pos_left:peaks[max_peak_index]+change_grad_pos_right] + # x_fit = xi[peaks[max_peak_index]-change_grad_pos_left:peaks + # [max_peak_index]+change_grad_pos_right] + # y_fit = yi[peaks[max_peak_index]-change_grad_pos_left:peaks + # [max_peak_index]+change_grad_pos_right] # model = Model(gaus) - # params = model.make_params(a=yi[peaks[max_peak_index]],mu=mean,sigma=sigma) + # params = model.make_params(a=yi[peaks[max_peak_index]], + # mu=mean,sigma=sigma) # result = model.fit(y_fit, params, x=x_fit) max_position_x_prefit = xi[peaks[max_peak_index]] @@ -523,7 +563,8 @@ def __call__(self, event: NectarCAMDataContainer, *args, **kwargs): # result_mu = result.params['mu'].value else: - # index_x_window_min = list(xi).index(closest_value(xi, signal_start[pix])) + # index_x_window_min = list(xi).index(closest_value(xi, + # signal_start[pix])) charge_sum = y[ signal_start[pix] : signal_start[pix] + self.window_width ].sum() @@ -533,10 +574,12 @@ def __call__(self, event: NectarCAMDataContainer, *args, **kwargs): # result_mu = -1 else: - # If no maximum is found, the integration is done between 20 and 36 ns. + # If no maximum is found, the integration is done between 20 and 36 + # ns. signal_start[pix] = 20 - # index_x_window_min = list(xi).index(closest_value(xi, signal_start[pix])) + # index_x_window_min = list(xi).index(closest_value(xi, + # signal_start[pix])) charge_sum = y[ signal_start[pix] : signal_start[pix] + self.window_width ].sum() @@ -562,7 +605,7 @@ def __call__(self, event: NectarCAMDataContainer, *args, **kwargs): # print("is good evt") self.__good_evts.append(self.__ff_event_ind) - ##This method need to be defined ! + # This method need to be defined ! def finish(self): output = ToMContainer( run_number=ToMContainer.fields["run_number"].type(self._run_number), @@ -587,12 +630,20 @@ def finish(self): class TimingResolutionTestTool(EventsLoopNectarCAMCalibrationTool): - """ - This class, `TimingResolutionTestTool`, is a subclass of `EventsLoopNectarCAMCalibrationTool` and is used to perform timing resolution tests on NectarCAM data. It reads the output data from the `ToMContainer` dataset and processes the charge, timing, and event information to calculate the timing resolution and mean charge in photoelectrons. - - The `finish()` method is the main entry point for this tool. It reads the output data from the HDF5 file, filters the data to remove cosmic ray events, and then calculates the timing resolution and mean charge per photoelectron. The timing resolution is calculated using a weighted mean and variance approach, with an option to use a bootstrapping method to estimate the error on the RMS value. - - The method returns the RMS of the timing resolution, the error on the RMS, and the mean charge in photoelectrons. + """This class, `TimingResolutionTestTool`, is a subclass of + `EventsLoopNectarCAMCalibrationTool` and is used to perform timing resolution tests + on NectarCAM data. It reads the output data from the `ToMContainer` dataset and + processes the charge, timing, and event information to calculate the timing + resolution and mean charge in photoelectrons. + + The `finish()` method is the main entry point for this tool. It reads the output + data from the HDF5 file, filters the data to remove cosmic ray events, and then + calculates the timing resolution and mean charge per photoelectron. The timing + resolution is calculated using a weighted mean and variance approach, with an option + to use a bootstrapping method to estimate the error on the RMS value. + + The method returns the RMS of the timing resolution, the error on the RMS, and the + mean charge in photoelectrons. """ name = "TimingResolutionTestTool" @@ -625,7 +676,7 @@ def finish(self, bootstrap=False, *args, **kwargs): charge_all.extend(tup[6]) tom_no_fit_all.extend(tup[7]) good_evts.extend(tup[8]) - except: + except Exception: break output_file.close() @@ -640,9 +691,11 @@ def finish(self, bootstrap=False, *args, **kwargs): # print(good_evts) charge = charge_all[good_evts] mean_charge_pe = np.mean(np.mean(charge, axis=0)) / 58.0 - # tom_mu = np.array(tom_mu_all[good_evts]).reshape(len(good_evts),output[0].npixels) + # tom_mu = np.array(tom_mu_all[good_evts]).reshape(len(good_evts), + # output[0].npixels) - # tom_sigma = np.array(tom_sigma_all[good_evts]).reshape(len(good_evts),output[0].npixels) + # tom_sigma = np.array(tom_sigma_all[good_evts]).reshape(len(good_evts), + # output[0].npixels) tom_no_fit = np.array(tom_no_fit_all[good_evts]).reshape( len(good_evts), npixels ) @@ -679,7 +732,8 @@ def finish(self, bootstrap=False, *args, **kwargs): bootsample = np.random.choice( sample, size=int(3 / 4 * (len(sample))), replace=True ) - # print(len(bootsample), bootsample.mean(), bootsample.std()) + # print(len(bootsample), bootsample.mean(), + # bootsample.std()) boot_rms.append(bootsample.std()) # simulated mean of rms bootrms_mean = np.mean(boot_rms) @@ -707,14 +761,15 @@ def finish(self, bootstrap=False, *args, **kwargs): rms[pix] = np.sqrt(weighted_variance) # print("RMS:", rms[pix]) - # Compute the total number of data points (sum of histogram values, i.e. N) + # Compute the total number of data points (sum of histogram + # values, i.e. N) N = np.sum(hist_values) # print("Total number of events (N):", N) # Error on the standard deviation err[pix] = rms[pix] / np.sqrt(2 * N) # print("Error on RMS:", err[pix]) - except: + except Exception: # no data rms[pix] = np.nan err[pix] = np.nan @@ -723,17 +778,22 @@ def finish(self, bootstrap=False, *args, **kwargs): class ToMPairsTool(EventsLoopNectarCAMCalibrationTool): - """ - This class, `ToMPairsTool`, is an `EventsLoopNectarCAMCalibrationTool` that is used to process ToM (Time of maximum) data from NectarCAM. + """This class, `ToMPairsTool`, is an `EventsLoopNectarCAMCalibrationTool`\ + that is used to process ToM (Time of maximum) data from NectarCAM. The `finish` method has the following functionalities: - - It reads in ToM data from an HDF5 file and applies a transit time correction to the ToM values using a provided lookup table. - - It calculates the time difference between ToM pairs for both corrected and uncorrected ToM values. - - It returns the uncorrected ToM values, the corrected ToM values, the pixel IDs, and the time difference calculations for the uncorrected and corrected ToM values. - - The class has several configurable parameters, including the list of NectarCAM components to apply, the maximum number of events to process, and the output file path. - + - It reads in ToM data from an HDF5 file and applies a transit time correction to\ + the ToM values using a provided lookup table. + - It calculates the time difference between ToM pairs for both corrected and\ + uncorrected ToM values. + - It returns the uncorrected ToM values, the corrected ToM values, the pixel IDs,\ + and the time difference calculations for the uncorrected and corrected\ + ToM values. + + The class has several configurable parameters, including the list of NectarCAM\ + components to apply, the maximum number of events to process, and the output\ + file path. """ name = "ToMPairsTool" @@ -765,7 +825,7 @@ def finish(self, *args, **kwargs): try: pixels_id.extend(tup[2]) tom_no_fit_all.extend(tup[7]) - except: + except Exception: break output_file.close() @@ -851,8 +911,8 @@ def finish(self, *args, **kwargs): class PedestalContainer(NectarCAMContainer): - """ - Attributes of the PedestalContainer class that store various data related to the pedestal of a NectarCAM event. + """Attributes of the PedestalContainer class that store various data related to the + pedestal of a NectarCAM event. Attributes: run_number (np.uint16): The run number associated with the waveforms. @@ -914,21 +974,30 @@ class PedestalContainer(NectarCAMContainer): class PedestalComp(NectarCAMComponent): - """ - The `PedestalComp` class is a NectarCAMComponent that is responsible for processing the pedestal and RMS of the high and low gain waveforms for each event. - - The `__init__` method initializes the `PedestalComp` class. It sets up several member variables to store pedestal related data such as timestamps, event types, event IDs, pedestal and pedestal rms values for both gains. - - The `__call__` method is called for each event, and it processes the waveforms to calculate the pedestal and RMS for the high and low gain channels. The results are stored in the class attributes `__pedestal_hg`, `__pedestal_lg`, `__rms_ped_hg`, and `__rms_ped_lg`. - - The `finish` method is called at the end of processing, and it returns a `PedestalContainer` object containing the calculated pedestal and RMS values, as well as other event information. + """The `PedestalComp` class is a NectarCAMComponent that is responsible for + processing the pedestal and RMS of the high and low gain waveforms for each event. + + The `__init__` method initializes the `PedestalComp` class. It sets up several\ + member variables to store pedestal related data such as timestamps,\ + event types,\ + event IDs, pedestal and pedestal rms values for both gains. + + The `__call__` method is called for each event, and it processes the waveforms to\ + calculate the pedestal and RMS for the high and low gain channels. The results\ + are stored in the class attributes `__pedestal_hg`, `__pedestal_lg`, \ + `__rms_ped_hg`, and `__rms_ped_lg`. + + The `finish` method is called at the end of processing, and it returns a\ + `PedestalContainer` object containing the calculated pedestal and RMS values\ + , as well as other event information. """ def __init__(self, subarray, config=None, parent=None, *args, **kwargs): super().__init__( subarray=subarray, config=config, parent=parent, *args, **kwargs ) - ## If you want you can add here members of MyComp, they will contain interesting quantity during the event loop process + # If you want you can add here members of MyComp, they will contain interesting + # quantity during the event loop process self.__ucts_timestamp = [] self.__event_type = [] @@ -941,7 +1010,7 @@ def __init__(self, subarray, config=None, parent=None, *args, **kwargs): self.__rms_ped_hg = [] self.__rms_ped_lg = [] - ##This method need to be defined ! + # This method need to be defined ! def __call__(self, event: NectarCAMDataContainer, *args, **kwargs): self.__event_id.append(np.uint32(event.index.event_id)) @@ -954,7 +1023,7 @@ def __call__(self, event: NectarCAMDataContainer, *args, **kwargs): wfs.append(event.r0.tel[0].waveform[constants.HIGH_GAIN][self.pixels_id]) wfs.append(event.r0.tel[0].waveform[constants.LOW_GAIN][self.pixels_id]) - #####THE JOB IS HERE###### + # #####THE JOB IS HERE###### for i, (pedestal, rms_pedestal) in enumerate( zip( @@ -972,7 +1041,7 @@ def __call__(self, event: NectarCAMDataContainer, *args, **kwargs): pedestal.append(ped) rms_pedestal.append(rms_ped) - ##This method need to be defined ! + # This method need to be defined ! def finish(self): output = PedestalContainer( run_number=PedestalContainer.fields["run_number"].type(self._run_number), @@ -1002,12 +1071,16 @@ def finish(self): class PedestalTool(EventsLoopNectarCAMCalibrationTool): - """ - This class is a part of the PedestalTool, which is an EventsLoopNectarCAMCalibrationTool. + """This class is a part of the PedestalTool, which is an + EventsLoopNectarCAMCalibrationTool. - The finish() method opens the output file, which is an HDF5 file, and extracts the `rms_ped_hg` (root mean square of the high gain pedestal) values from the `PedestalContainer` dataset. Finally, it closes the output file and returns the list of `rms_ped_hg` values. + The finish() method opens the output file, which is an HDF5 file,\ + and extracts the `rms_ped_hg` (root mean square of the high gain pedestal)\ + values from the `PedestalContainer` dataset. Finally, it closes the output\ + file and returns the list of `rms_ped_hg` values. - This method is used to post-process the output of the PedestalTool and extract specific information from the generated HDF5 file. + This method is used to post-process the output of the PedestalTool and extract\ + specific information from the generated HDF5 file. """ name = "PedestalTool" @@ -1042,7 +1115,7 @@ def finish(self, *args, **kwargs): for tup in data: try: rms_ped_hg.extend(tup[8]) - except: + except Exception: break output_file.close() @@ -1051,8 +1124,8 @@ def finish(self, *args, **kwargs): class UCTSContainer(NectarCAMContainer): - """ - Defines the fields for the UCTSContainer class, which is used to store various data related to UCTS events. + """Defines the fields for the UCTSContainer class, which is used to store various + data related to UCTS events. The fields include: - `run_number`: The run number associated with the waveforms. @@ -1096,12 +1169,15 @@ class UCTSContainer(NectarCAMContainer): class UCTSComp(NectarCAMComponent): - """ - The `__init__` method initializes the `UCTSComp` class, which is a NectarCAMComponent. It sets up several member variables to store UCTS related data, such as timestamps, event types, event IDs, busy counters, and event counters. + """The `__init__` method initializes the `UCTSComp` class, which is a + NectarCAMComponent. It sets up several member variables to store UCTS related data, + such as timestamps, event types, event IDs, busy counters, and event counters. - The `__call__` method is called for each event, and it appends the UCTS-related data from the event to the corresponding member variables. + The `__call__` method is called for each event, and it appends the UCTS-related\ + data from the event to the corresponding member variables. - The `finish` method creates and returns a `UCTSContainer` object, which is a container for the UCTS-related data that was collected during the event loop. + The `finish` method creates and returns a `UCTSContainer` object, which is a\ + container for the UCTS-related data that was collected during the event loop. """ window_shift = Integer( @@ -1120,7 +1196,8 @@ def __init__( super().__init__( subarray=subarray, config=config, parent=parent, *args, **kwargs ) - ## If you want you can add here members of MyComp, they will contain interesting quantity during the event loop process + # If you want you can add here members of MyComp, they will contain interesting + # quantity during the event loop process self.__ucts_timestamp = [] self.__event_type = [] @@ -1130,7 +1207,7 @@ def __init__( self.excl_muons = None self.__mean_event_charge = [] - ##This method need to be defined ! + # This method need to be defined ! def __call__(self, event: NectarCAMDataContainer, *args, **kwargs): take_event = True @@ -1193,7 +1270,7 @@ def __call__(self, event: NectarCAMDataContainer, *args, **kwargs): if self.excl_muons: self.__mean_event_charge.append(mean_charge) - ##This method need to be defined ! + # This method need to be defined ! def finish(self): output = UCTSContainer( run_number=UCTSContainer.fields["run_number"].type(self._run_number), @@ -1218,10 +1295,16 @@ def finish(self): class DeadtimeTestTool(EventsLoopNectarCAMCalibrationTool): - """ - The `DeadtimeTestTool` class is an `EventsLoopNectarCAMCalibrationTool` that is used to test the deadtime of NectarCAM. - - The `finish` method is responsible for reading the data from the HDF5 file, extracting the relevant information (UCTS timestamps, event counters, and busy counters), and calculating the deadtime-related metrics. The method returns the UCTS timestamps, the time differences between consecutive UCTS timestamps, the event counters, the busy counters, the collected trigger rate, the total time, and the deadtime percentage. + """The `DeadtimeTestTool` class is an `EventsLoopNectarCAMCalibrationTool` that is + used to test the deadtime of NectarCAM. + + The `finish` method is responsible for reading the data from the HDF5 file,\ + extracting the relevant information (UCTS timestamps, event counters, and\ + busy counters), and calculating the deadtime-related metrics. The method\ + returns the UCTS timestamps, the time differences between consecutive\ + UCTS timestamps, the event counters, the busy counters,\ + the collected\ + trigger rate, the total time, and the deadtime percentage. """ name = "DeadtimeTestTool" @@ -1250,7 +1333,7 @@ def finish(self, *args, **kwargs): ucts_timestamps.extend(tup[3]) event_counter.extend(tup[7]) busy_counter.extend(tup[6]) - except: + except Exception: break # print(output_file.keys()) # tom_mu_all= output[0].tom_mu @@ -1285,10 +1368,15 @@ def finish(self, *args, **kwargs): class TriggerTimingTestTool(EventsLoopNectarCAMCalibrationTool): - """ - The `TriggerTimingTestTool` class is an `EventsLoopNectarCAMCalibrationTool` that is used to test the trigger timing of NectarCAM. - - The `finish` method is responsible for reading the data from the HDF5 file, extracting the relevant information (UCTS timestamps), and calculating the RMS value of the difference between consecutive triggers. The method returns the UCTS timestamps, the time differences between consecutive triggers for events concerning more than 10 pixels (non-muon related events). + """The `TriggerTimingTestTool` class is an `EventsLoopNectarCAMCalibrationTool` that + is used to test the trigger timing of NectarCAM. + + The `finish` method is responsible for reading the data from the HDF5 file,\ + extracting the relevant information (UCTS timestamps), and calculating\ + the RMS value of the difference between consecutive triggers. The method\ + returns the UCTS timestamps, the time differences between consecutive\ + triggers for events concerning more than 10 pixels (non-muon\ + related events). """ name = "TriggerTimingTestTool" @@ -1323,7 +1411,7 @@ def finish(self, *args, **kwargs): ucts_timestamps.extend(tup[3]) charge_per_event.extend(tup[4]) - except: + except Exception: break # print(output_file.keys()) # tom_mu_all= output[0].tom_mu diff --git a/src/nectarchain/user_scripts/dmousadi/TRR_scripts/src/trigger_timing.py b/src/nectarchain/trr_test_suite/trigger_timing.py similarity index 79% rename from src/nectarchain/user_scripts/dmousadi/TRR_scripts/src/trigger_timing.py rename to src/nectarchain/trr_test_suite/trigger_timing.py index 6dcc8f7d..933cf03e 100644 --- a/src/nectarchain/user_scripts/dmousadi/TRR_scripts/src/trigger_timing.py +++ b/src/nectarchain/trr_test_suite/trigger_timing.py @@ -7,23 +7,29 @@ import matplotlib.pyplot as plt import numpy as np -from tools_components import TriggerTimingTestTool -from utils import pe2photons + +from nectarchain.trr_test_suite.tools_components import TriggerTimingTestTool +from nectarchain.trr_test_suite.utils import pe2photons def get_args(): - """ - Parses command-line arguments for the deadtime test script. + """Parses command-line arguments for the deadtime test script. Returns: argparse.ArgumentParser: The parsed command-line arguments. """ parser = argparse.ArgumentParser( description="Trigger Timing Test B-TEL-1410. \n" - + "According to the nectarchain component interface, you have to set a NECTARCAMDATA environment variable in the folder where you have the data from your runs or where you want them to be downloaded.\n" - + "You have to give a list of runs (run numbers with spaces inbetween) and an output directory to save the final plot.\n" - + "If the data is not in NECTARCAMDATA, the files will be downloaded through DIRAC.\n For the purposes of testing this script, default data is from the runs used for this test in the TRR document.\n" - + "You can optionally specify the number of events to be processed (default 1000).\n" + + "According to the nectarchain component interface, you have to set\ + a NECTARCAMDATA environment variable in the folder where you have the data\ + from your runs or where you want them to be downloaded.\n" + + "You have to give a list of runs (run numbers with spaces inbetween) and an\ + output directory to save the final plot.\n" + + "If the data is not in NECTARCAMDATA, the files will be downloaded through\ + DIRAC.\n For the purposes of testing this script, default data is from the\ + runs used for this test in the TRR document.\n" + + "You can optionally specify the number of events to be processed\ + (default 1000).\n" ) parser.add_argument( "-r", @@ -58,15 +64,21 @@ def get_args(): def main(): - """ - Runs the deadtime test script, which performs deadtime tests B-TEL-1260 and B-TEL-1270. + """Runs the deadtime test script, which performs deadtime tests B-TEL-1260 and + B-TEL-1270. - The script takes command-line arguments to specify the list of runs, corresponding sources, number of events to process, and output directory. It then processes the data for each run, performs an exponential fit to the deadtime distribution, and generates two plots: + The script takes command-line arguments to specify the list of runs, corresponding\ + sources, number of events to process, and output directory. It then processes\ + the data for each run, performs an exponential fit to the deadtime\ + distribution, and generates two plots: - 1. A plot of deadtime percentage vs. collected trigger rate, with the CTA requirement indicated. - 2. A plot of the rate from the fit vs. the collected trigger rate, with the relative difference shown in the bottom panel. + 1. A plot of deadtime percentage vs. collected trigger rate, with the CTA\ + requirement indicated. + 2. A plot of the rate from the fit vs. the collected trigger rate, with the\ + relative difference shown in the bottom panel. - The script also saves the generated plots to the specified output directory, and optionally saves them to a temporary output directory for use in a GUI. + The script also saves the generated plots to the specified output directory,\ + and optionally saves them to a temporary output directory for use in a GUI. """ parser = get_args() @@ -141,7 +153,8 @@ def main(): cta_requirement_y = 5 # Y-value for the CTA requirement ax.axhline(y=cta_requirement_y, color="purple", linestyle="--") - # Add the small vertical arrows starting from the CTA requirement line and pointing downwards + # Add the small vertical arrows starting from the CTA requirement line and pointing + # downwards arrow_positions = [20, 80, 200] # X-positions for the arrows for x_pos in arrow_positions: ax.annotate( @@ -151,7 +164,8 @@ def main(): arrowprops=dict(arrowstyle="->", color="purple", lw=1.5), ) # Arrow pointing downwards - # Add the CTA requirement label exactly above the dashed line, centered between arrows + # Add the CTA requirement label exactly above the dashed line, centered between + # arrows ax.text( 140, cta_requirement_y + 0.5, diff --git a/src/nectarchain/user_scripts/dmousadi/TRR_scripts/src/utils.py b/src/nectarchain/trr_test_suite/utils.py similarity index 88% rename from src/nectarchain/user_scripts/dmousadi/TRR_scripts/src/utils.py rename to src/nectarchain/trr_test_suite/utils.py index 27f8b202..b8fec4c9 100644 --- a/src/nectarchain/user_scripts/dmousadi/TRR_scripts/src/utils.py +++ b/src/nectarchain/trr_test_suite/utils.py @@ -241,13 +241,15 @@ def pe_from_intensity_percentage( percent_from_calibration=intensity_percent, known_charge=intensity_to_charge, ): - """ - Converts a percentage of intensity to the corresponding charge value based on a known calibration. + """Converts a percentage of intensity to the corresponding charge value based on a + known calibration. Args: percent (numpy.ndarray): The percentage of intensity to convert to charge. - percent_from_calibration (numpy.ndarray, optional): The known percentages of intensity used in the calibration. Defaults to `intensity_percent`. - known_charge (numpy.ndarray, optional): The known charge values corresponding to the calibration percentages. Defaults to `intensity_to_charge`. + percent_from_calibration (numpy.ndarray, optional): The known percentages of\ + intensity used in the calibration. Defaults to `intensity_percent`. + known_charge (numpy.ndarray, optional): The known charge values corresponding\ + to the calibration percentages. Defaults to `intensity_to_charge`. Returns: numpy.ndarray: The charge values corresponding to the input percentages. @@ -265,8 +267,7 @@ def pe_from_intensity_percentage( # functions by federica def linear_fit_function(x, a, b): - """ - Computes a linear function of the form `a*x + b`. + """Computes a linear function of the form `a*x + b`. Args: x (float): The input value. @@ -280,8 +281,7 @@ def linear_fit_function(x, a, b): def second_degree_fit_function(x, a, b, c): - """ - Computes a quadratic function of the form `a*(x**2) + b*x + c`. + """Computes a quadratic function of the form `a*(x**2) + b*x + c`. Args: x (float): The input value. @@ -296,8 +296,7 @@ def second_degree_fit_function(x, a, b, c): def third_degree_fit_function(x, a, b, c, d): - """ - Computes a function of the form `(a*x + b)/(1+c) + d`. + """Computes a function of the form `(a*x + b)/(1+c) + d`. Args: x (float): The input value. @@ -313,8 +312,7 @@ def third_degree_fit_function(x, a, b, c, d): def fit_function_hv(x, a, b): - """ - Computes a function of the form `a/sqrt(x) + b`. + """Computes a function of the form `a/sqrt(x) + b`. Args: x (float): The input value. @@ -328,15 +326,16 @@ def fit_function_hv(x, a, b): def err_ratio(nominator, denominator, err_norm, err_denom, cov_nom_den=0): - """ - Computes the error ratio for a given nominator, denominator, and their respective errors. + """Computes the error ratio for a given nominator, denominator, and their respective + errors. Args: nominator (float): The nominator value. denominator (float): The denominator value. err_norm (float): The error of the nominator. err_denom (float): The error of the denominator. - cov_nom_den (float, optional): The covariance between the nominator and denominator. Defaults to 0. + cov_nom_den (float, optional): The covariance between the nominator and\ + denominator. Defaults to 0. Returns: float: The error ratio. @@ -351,10 +350,11 @@ def err_ratio(nominator, denominator, err_norm, err_denom, cov_nom_den=0): def err_sum(err_a, err_b, cov_a_b=0): - """ - Computes the square root of the sum of the squares of `err_a` and `err_b`, plus twice the covariance `cov_a_b`. + """Computes the square root of the sum of the squares of `err_a` and `err_b`, plus + twice the covariance `cov_a_b`. - This function is used to calculate the combined error of two values, taking into account their individual errors and the covariance between them. + This function is used to calculate the combined error of two values, taking into\ + account their individual errors and the covariance between them. Args: err_a (float): The error of the first value. @@ -369,18 +369,22 @@ def err_sum(err_a, err_b, cov_a_b=0): # from stackoverflow def argmedian(x, axis=None): - """ - Returns the index of the median element in the input array `x` along the specified axis. + """Returns the index of the median element in the input array `x` along the + specified axis. - If `axis` is `None`, the function returns the index of the median element in the flattened array. - Otherwise, it computes the argmedian along the specified axis and returns an array of indices. + If `axis` is `None`, the function returns the index of the median element in\ + the flattened array. + Otherwise, it computes the argmedian along the specified axis and returns an\ + array of indices. Args: x (numpy.ndarray): The input array. - axis (int or None, optional): The axis along which to compute the argmedian. If `None`, the argmedian is computed on the flattened array. + axis (int or None, optional): The axis along which to compute the argmedian.\ + If `None`, the argmedian is computed on the flattened array. Returns: - int or numpy.ndarray: The index or indices of the median element(s) in the input array. + int or numpy.ndarray: The index or indices of the median element(s) in the\ + input array. """ if axis is None: return np.argpartition(x, len(x) // 2)[len(x) // 2] @@ -392,8 +396,8 @@ def argmedian(x, axis=None): def pe2photons(x): - """ - Converts the input value `x` from photons to photoelectrons (PE) by multiplying it by 4. + """Converts the input value `x` from photons to photoelectrons (PE) by multiplying + it by 4. Args: x (float): The input value in photons. @@ -405,8 +409,8 @@ def pe2photons(x): def photons2pe(x): - """ - Converts the input value `x` from photoelectrons (PE) to photons by dividing it by 4. + """Converts the input value `x` from photoelectrons (PE) to photons by dividing it + by 4. Args: x (float): The input value in photoelectrons. @@ -419,8 +423,8 @@ def photons2pe(x): # from federica's notebook class ExponentialFitter: - """ - Represents an exponential fitter class that computes the expected distribution and the minus 2 log likelihood for a given dataset and exponential parameters. + """Represents an exponential fitter class that computes the expected distribution + and the minus 2 log likelihood for a given dataset and exponential parameters. Attributes: datas (numpy.ndarray): The input data array. @@ -428,7 +432,8 @@ class ExponentialFitter: Methods: compute_expected_distribution(norm, loc, scale): - Computes the expected distribution given the normalization, location, and scale parameters. + Computes the expected distribution given the normalization, location, and\ + scale parameters. expected_distribution(x): Returns the expected distribution given the parameters in `x`. compute_minus2loglike(x): @@ -466,8 +471,7 @@ def compute_minus2loglike(self, x): def pois(x, A, R): - """ - Computes the expected distribution for a Poisson process with rate parameter `R`. + """Computes the expected distribution for a Poisson process with rate parameter `R`. Args: x (float): The input value. @@ -477,13 +481,12 @@ def pois(x, A, R): Returns: float: The expected distribution for the Poisson process. """ - """poisson function, parameter r (rate) is the fit parameter""" + """Poisson function, parameter r (rate) is the fit parameter.""" return A * np.exp(x * R) def deadtime_and_expo_fit(time_tot, deadtime_us, run, output_plot=None): - """ - Computes the deadtime and exponential fit parameters for a given dataset. + """Computes the deadtime and exponential fit parameters for a given dataset. Args: time_tot (float): The total time of the dataset. @@ -579,8 +582,9 @@ def deadtime_and_expo_fit(time_tot, deadtime_us, run, output_plot=None): ax.text( 600, entries[1] / 1, - "$y = A \cdot \exp({-R \cdot x})$\n" - # + r'$A=%2.2f \pm %2.2f$'%(as_si((result.params['A'].value/1000)*1e3,2), as_si((result.params['A'].stderr/1000)*1e3,2)) + r"$y = A \cdot \exp({-R \cdot x})$\n" + # + r'$A=%2.2f \pm %2.2f$'%(as_si((result.params['A'].value/1000) + # *1e3,2), as_si((result.params['A'].stderr/1000)*1e3,2)) + r"$A=%2.2f \pm %2.2f$" % (result.params["A"].value, result.params["A"].stderr) + "\n" diff --git a/src/nectarchain/user_scripts/dmousadi/TRR_scripts/src/__init__.py b/src/nectarchain/user_scripts/dmousadi/TRR_scripts/src/__init__.py deleted file mode 100644 index e69de29b..00000000