Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Trr tests implementation in the core nectarchain module #173

Open
wants to merge 15 commits into
base: main
Choose a base branch
from
1 change: 1 addition & 0 deletions docs/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,7 @@
# class signature)
"numpydoc",
"sphinx_design",
"sphinx_qt_documentation",
]

autosummary_generate = True # Turn on sphinx.ext.autosummary
Expand Down
3 changes: 3 additions & 0 deletions environment.yml
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,8 @@ dependencies:
- sphinx
- sphinx-automodapi
- pydata-sphinx-theme
- lmfit # needed into TRR
- h5py # needed into TRR (should be removed to use I/O methods of containers)
- pyqt # [linux]
- pip:
- zeo
Expand All @@ -25,3 +27,4 @@ dependencies:
- browser-cookie3
- pyqtgraph
- pyqt6 # [osx and arm64]
- sphinx-qt-documentation
6 changes: 5 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,10 @@ dependencies = [
"scipy==1.11.4",
"zodb",
"zeo",
"pyqt6",
"lmfit",
"h5py",
'pyqt5 ; platform_system == "Linux"',
'pyqt6 ; platform_system != "Linux"', # for macOS
"pyqtgraph",
]

Expand All @@ -52,6 +55,7 @@ docs = [
"sphinx-autodoc-typehints",
"sphinx-automodapi",
"sphinx-design",
"sphinx-qt-documentation",
"pydata_sphinx_theme",
"numpydoc",
"tomli; python_version < '3.11'"
Expand Down
5 changes: 5 additions & 0 deletions src/nectarchain/trr_test_suite/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
from .gui import TestRunner

Check warning on line 1 in src/nectarchain/trr_test_suite/__init__.py

View check run for this annotation

Codecov / codecov/patch

src/nectarchain/trr_test_suite/__init__.py#L1

Added line #L1 was not covered by tests

__all__ = [

Check warning on line 3 in src/nectarchain/trr_test_suite/__init__.py

View check run for this annotation

Codecov / codecov/patch

src/nectarchain/trr_test_suite/__init__.py#L3

Added line #L3 was not covered by tests
"TestRunner",
]
Original file line number Diff line number Diff line change
Expand Up @@ -9,23 +9,33 @@
import numpy as np
from astropy import units as u
from iminuit import Minuit
from tools_components import DeadtimeTestTool
from utils import ExponentialFitter, deadtime_labels, source_ids_deadtime

from nectarchain.trr_test_suite.tools_components import DeadtimeTestTool
from nectarchain.trr_test_suite.utils import (

Check warning on line 14 in src/nectarchain/trr_test_suite/deadtime.py

View check run for this annotation

Codecov / codecov/patch

src/nectarchain/trr_test_suite/deadtime.py#L13-L14

Added lines #L13 - L14 were not covered by tests
ExponentialFitter,
deadtime_labels,
source_ids_deadtime,
)


def get_args():
"""
Parses command-line arguments for the deadtime test script.
"""Parses command-line arguments for the deadtime test script.

Returns:
argparse.ArgumentParser: The parsed command-line arguments.
"""
parser = argparse.ArgumentParser(
description="Deadtime tests B-TEL-1260 & B-TEL-1270. \n"
+ "According to the nectarchain component interface, you have to set a NECTARCAMDATA environment variable in the folder where you have the data from your runs or where you want them to be downloaded.\n"
+ "You have to give a list of runs (run numbers with spaces inbetween), a corresponding source list and an output directory to save the final plot.\n"
+ "If the data is not in NECTARCAMDATA, the files will be downloaded through DIRAC.\n For the purposes of testing this script, default data is from the runs used for this test in the TRR document.\n"
+ "You can optionally specify the number of events to be processed (default 1000).\n"
+ "According to the nectarchain component interface, you have to set a\
NECTARCAMDATA environment variable in the folder where you have the data\
from your runs or where you want them to be downloaded.\n"
+ "You have to give a list of runs (run numbers with spaces inbetween), a \
corresponding source list and an output directory to save the final plot.\n"
+ "If the data is not in NECTARCAMDATA, the files will be downloaded through \
DIRAC.\n For the purposes of testing this script, default data is from the\
runs used for this test in the TRR document.\n"
+ "You can optionally specify the number of events to be processed \
(default 1000).\n"
)
parser.add_argument(
"-r",
Expand All @@ -42,7 +52,8 @@
type=int,
choices=[0, 1, 2],
nargs="+",
help="List of corresponding source for each run: 0 for random generator, 1 for nsb source, 2 for laser",
help="List of corresponding source for each run: 0 for random generator,\
1 for nsb source, 2 for laser",
required=False,
default=source_ids_deadtime,
)
Expand Down Expand Up @@ -70,15 +81,21 @@


def main():
"""
Runs the deadtime test script, which performs deadtime tests B-TEL-1260 and B-TEL-1270.
"""Runs the deadtime test script, which performs deadtime tests B-TEL-1260 and
B-TEL-1270.

The script takes command-line arguments to specify the list of runs, corresponding sources, number of events to process, and output directory. It then processes the data for each run, performs an exponential fit to the deadtime distribution, and generates two plots:
The script takes command-line arguments to specify the list of runs, corresponding\
sources, number of events to process, and output directory. It then processes\
the data for each run, performs an exponential fit to the deadtime\
distribution, and generates two plots:

1. A plot of deadtime percentage vs. collected trigger rate, with the CTA requirement indicated.
2. A plot of the rate from the fit vs. the collected trigger rate, with the relative difference shown in the bottom panel.
1. A plot of deadtime percentage vs. collected trigger rate, with the CTA\
requirement indicated.
2. A plot of the rate from the fit vs. the collected trigger rate, with the\
relative difference shown in the bottom panel.

The script also saves the generated plots to the specified output directory, and optionally saves them to a temporary output directory for use in a GUI.
The script also saves the generated plots to the specified output directory, and\
optionally saves them to a temporary output directory for use in a GUI.
"""

parser = get_args()
Expand Down Expand Up @@ -189,7 +206,8 @@
m.limits["deadtime"] = (
0.6e-6,
1.1e-6,
) # Put some tigh constrain as the fit will be in trouble when it expect 0. and measured something instead.
) # Put some tigh constrain as the fit will be in trouble when it expect 0. and
# measured something instead.

m.print_level = 2

Expand All @@ -202,16 +220,19 @@
# print(fitted_params_err)

print(
f"Dead-Time is {1.e6*fitted_params[1]:.3f} +- {1.e6*fitted_params_err[1]:.3f} µs"
f"Dead-Time is {1.e6*fitted_params[1]:.3f} +- "
f"{1.e6*fitted_params_err[1]:.3f} µs"
)
print(
f"Rate is {1./fitted_params[2]:.2f} +- {fitted_params_err[2]/(fitted_params[2]**2):.2f} Hz"
f"Rate is {1./fitted_params[2]:.2f} +-"
f"{fitted_params_err[2]/(fitted_params[2]**2):.2f} Hz"
)
print(f"Expected run duration is {fitted_params[0]*fitted_params[2]:.2f} s")

fitted_rate.append(1.0 / fitted_params[2])

# plt.savefig(figurepath + 'deadtime_exponential_fit_nsb_run{}_newfit_cutoff.png'.format(run))
# plt.savefig(figurepath + 'deadtime_exponential_fit_nsb_run{}_newfit
# _cutoff.png'.format(run))

y = data_content
y_fit = fitter.expected_distribution(fitted_params)
Expand All @@ -236,13 +257,13 @@

parameter_R2_new_list.append(r2)

deadtime_from_fit = parameter_tau_new_list
deadtime_from_fit_err = parameter_tau_err_new_list
# deadtime_from_fit = parameter_tau_new_list
# deadtime_from_fit_err = parameter_tau_err_new_list
lambda_from_fit = parameter_lambda_new_list
lambda_from_fit_err = parameter_lambda_err_new_list
A2_from_fit = parameter_A2_new_list
A2_from_fit_err = parameter_A2_err_new_list
R2_from_fit = parameter_R2_new_list
# A2_from_fit = parameter_A2_new_list
# A2_from_fit_err = parameter_A2_err_new_list
# R2_from_fit = parameter_R2_new_list

#######################################
# PLOT
Expand All @@ -260,9 +281,9 @@
ids = np.array(ids)
runlist = np.array(runlist)

ratio_list = []
collected_rate = []
err = []
# ratio_list = []
# collected_rate = []
# err = []

for source in range(0, 3):
# runl = np.where(ids==source)[0]
Expand Down Expand Up @@ -399,15 +420,17 @@
ax1.errorbar(
collected_trigger_rate[runl] / 1000,
rate[runl],
# xerr=((df_mean_nsb[df_mean_nsb['Run']==run]['Collected_trigger_rate[Hz]_err']))/1000,
# xerr=((df_mean_nsb[df_mean_nsb['Run']==run]['Collected_trigger
# _rate[Hz]_err']))/1000,
yerr=rate_err[runl],
alpha=0.9,
ls=" ",
marker="o",
color=labels[source]["color"],
label=labels[source]["source"],
)
# label = 'Run {} ({} V)'.format(run, df_mean_rg[df_mean_rg['Run']==run]['Voltage[V]'].values[0]))
# label = 'Run {} ({} V)'.format(run, df_mean_rg[df_mean_rg['Run']=
# =run]['Voltage[V]'].values[0]))

ax1.legend(frameon=False, prop={"size": 10}, loc="upper left", ncol=1)

Expand All @@ -424,7 +447,7 @@
main()


##################################PREVIOUS###############################
# ##################################PREVIOUS###############################
# collected_rate = []


Expand All @@ -445,8 +468,10 @@

# for i, run in enumerate(runlist):
# deadtime_run, deadtime_bin_run, deadtime_err_run, deadtime_bin_length_run, \
# total_delta_t_for_busy_time, parameter_A_new, parameter_R_new, parameter_A_err_new, parameter_R_err_new, \
# first_bin_length, tot_nr_events_histo = deadtime_and_expo_fit(time_tot[i],deadtime_us[i], run)
# total_delta_t_for_busy_time, parameter_A_new, parameter_R_new, parameter_A_err_
# new, parameter_R_err_new, \
# first_bin_length, tot_nr_events_histo = deadtime_and_expo_fit(time_tot[i],deadt
# ime_us[i], run)
# total_delta_t_for_busy_time_list.append(total_delta_t_for_busy_time)
# parameter_A_new_list.append(parameter_A_new)
# parameter_R_new_list.append(parameter_R_new)
Expand All @@ -468,7 +493,8 @@
# rate_err = (np.array(parameter_R_err_new_list) * 1 / u.us).to(u.kHz).to_value()
# A_from_fit = (parameter_A_new_list)
# A_from_fit_err = (parameter_A_err_new_list)
# ucts_busy_rate = (np.array(busy_counter[:,-1]) / (np.array(time_tot) * u.s).to(u.s)).to(
# ucts_busy_rate = (np.array(busy_counter[:,-1]) / (np.array(time_tot) * u.s).to(u.s))
# .to(
# u.kHz).value
# nr_events_from_histo = (tot_nr_events_histo)
# first_bin_delta_t = first_bin_length
Expand All @@ -478,7 +504,7 @@
# deadtime_average_err_nsb = np.sqrt(1 / (np.sum(1 / deadtime_bin_length ** 2)))


# #######################################################################################
# ######################################################################################


# #B-TEL-1260
Expand All @@ -502,7 +528,8 @@

# ratio = rate[i]/freq
# ratio_list.append(np.array(ratio)*100)
# ratio_err = np.sqrt((rate_err[i]/freq)**2 + (freq_err*rate[i]/(freq**2)))
# ratio_err = np.sqrt((rate_err[i]/freq)**2 + (freq_err*rate[i]/
# (freq**2)))
# err.append(ratio_err*100)


Expand All @@ -512,7 +539,8 @@
# X_sorted = [x for y, x in sorted(zip(Y, X))]
# err_sorted = [err for y,err in sorted(zip(Y,err))]

# plt.errorbar(sorted(Y), X_sorted, yerr = err_sorted, alpha=0.6, ls='-', marker='o',color=labels[source]['color'], label = labels[source]['source'])
# plt.errorbar(sorted(Y), X_sorted, yerr = err_sorted, alpha=0.6, ls='-',
# marker='o',color=labels[source]['color'], label = labels[source]['source'])

# plt.xlabel('Collected Trigger Rate [kHz]')
# plt.ylabel(r'Deadtime [%]')
Expand Down Expand Up @@ -564,7 +592,8 @@
# ax1.plot(x, x, color='gray', ls='--', alpha=0.5)

# ax2.plot(x, np.zeros(len(x)), color='gray', ls='--', alpha=0.5)
# ax2.fill_between(x, np.ones(len(x))*(-10), np.ones(len(x))*(10), color='gray', alpha=0.1)
# ax2.fill_between(x, np.ones(len(x))*(-10), np.ones(len(x))*(10), color='gray',
# alpha=0.1)

# ax2.set_xlabel('Collected Trigger Rate [kHz]')
# ax1.set_ylabel(r'Rate from fit [kHz]')
Expand All @@ -581,11 +610,14 @@
# #print(collected_triger_rate[runl])
# ax1.errorbar(collected_triger_rate[runl]/1000,
# rate[runl],
# #xerr=((df_mean_nsb[df_mean_nsb['Run']==run]['Collected_trigger_rate[Hz]_err']))/1000,
# #xerr=((df_mean_nsb[df_mean_nsb['Run']==run]
# ['Collected_trigger_rate[Hz]_err']))/1000,
# yerr=rate_err[runl],
# alpha=0.9,
# ls=' ', marker='o', color=labels[source]['color'], label = labels[source]['source'])
# # label = 'Run {} ({} V)'.format(run, df_mean_rg[df_mean_rg['Run']==run]['Voltage[V]'].values[0]))
# ls=' ', marker='o', color=labels[source]['color'],
# label = labels[source]['source'])
# # label = 'Run {} ({} V)'.format(run, df_mean_rg[df_mean_rg['Run']==
# run]['Voltage[V]'].values[0]))

# ax1.legend(frameon=False, prop={'size':10},
# loc="upper left", ncol=1)
Expand Down
Loading