From dadf5f48699ca44c72df5c94e0feeb0751346dc9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Erik=20Fr=C3=B6jdh?= Date: Wed, 12 Feb 2025 16:50:31 +0100 Subject: [PATCH] Added fitting, fixed roi etc (#129) Co-authored-by: Patrick Co-authored-by: JulianHeymes --- ...{deploy.yml => build_and_deploy_conda.yml} | 2 - .github/workflows/build_conda.yml | 40 ++ CMakeLists.txt | 47 ++- conda-recipe/meta.yaml | 2 +- docs/CMakeLists.txt | 23 +- docs/src/ClusterFinderMT.rst | 7 + docs/src/index.rst | 4 + docs/src/pyClusterVector.rst | 33 ++ docs/src/pyFit.rst | 19 + include/aare/CircularFifo.hpp | 97 +++++ include/aare/ClusterCollector.hpp | 52 +++ include/aare/ClusterFile.hpp | 72 +++- include/aare/ClusterFileSink.hpp | 56 +++ include/aare/ClusterFinder.hpp | 131 +------ include/aare/ClusterFinderMT.hpp | 268 ++++++++++++++ include/aare/ClusterVector.hpp | 159 ++++++-- include/aare/File.hpp | 2 + include/aare/Fit.hpp | 76 ++++ include/aare/Pedestal.hpp | 8 +- include/aare/ProducerConsumerQueue.hpp | 203 +++++++++++ include/aare/RawFile.hpp | 15 +- include/aare/RawMasterFile.hpp | 11 - include/aare/RawSubFile.hpp | 3 + include/aare/decode.hpp | 13 + include/aare/defs.hpp | 33 +- include/aare/geo_helpers.hpp | 16 + include/aare/utils/task.hpp | 8 + patches/lmfit.patch | 13 + pyproject.toml | 2 +- python/CMakeLists.txt | 12 +- python/aare/__init__.py | 10 +- python/aare/func.py | 1 + python/aare/transform.py | 12 +- python/aare/utils.py | 6 +- python/examples/fits.py | 79 ++++ python/examples/play.py | 86 +++-- python/src/cluster.hpp | 183 +++++++--- python/src/cluster_file.hpp | 38 +- python/src/ctb_raw_file.hpp | 42 +++ python/src/file.hpp | 40 +- python/src/fit.hpp | 223 ++++++++++++ python/src/module.cpp | 6 + python/src/np_helper.hpp | 66 +--- src/ClusterFile.cpp | 341 +++++++++--------- src/ClusterVector.test.cpp | 112 +++++- src/File.cpp | 2 + src/Fit.cpp | 300 +++++++++++++++ src/RawFile.cpp | 157 +++----- src/RawFile.test.cpp | 5 + src/RawSubFile.cpp | 50 ++- src/decode.cpp | 61 ++++ src/geo_helpers.cpp | 71 ++++ src/geo_helpers.test.cpp | 230 ++++++++++++ src/utils/task.cpp | 30 ++ src/utils/task.test.cpp | 32 ++ 55 files changed, 2924 insertions(+), 686 deletions(-) rename .github/workflows/{deploy.yml => build_and_deploy_conda.yml} (94%) create mode 100644 .github/workflows/build_conda.yml create mode 100644 docs/src/ClusterFinderMT.rst create mode 100644 docs/src/pyClusterVector.rst create mode 100644 docs/src/pyFit.rst create mode 100644 include/aare/CircularFifo.hpp create mode 100644 include/aare/ClusterCollector.hpp create mode 100644 include/aare/ClusterFileSink.hpp create mode 100644 include/aare/ClusterFinderMT.hpp create mode 100644 include/aare/Fit.hpp create mode 100644 include/aare/ProducerConsumerQueue.hpp create mode 100644 include/aare/decode.hpp create mode 100644 include/aare/geo_helpers.hpp create mode 100644 include/aare/utils/task.hpp create mode 100644 patches/lmfit.patch create mode 100644 python/aare/func.py create mode 100644 python/examples/fits.py create mode 100644 python/src/fit.hpp create mode 100644 src/Fit.cpp create mode 100644 src/decode.cpp create mode 100644 src/geo_helpers.cpp create mode 100644 src/geo_helpers.test.cpp create mode 100644 src/utils/task.cpp create mode 100644 src/utils/task.test.cpp diff --git a/.github/workflows/deploy.yml b/.github/workflows/build_and_deploy_conda.yml similarity index 94% rename from .github/workflows/deploy.yml rename to .github/workflows/build_and_deploy_conda.yml index 81edde3f..90e75c13 100644 --- a/.github/workflows/deploy.yml +++ b/.github/workflows/build_and_deploy_conda.yml @@ -4,7 +4,6 @@ on: push: branches: - main - - developer jobs: build: @@ -34,7 +33,6 @@ jobs: run: conda install conda-build=24.9 conda-verify pytest anaconda-client - name: Enable upload - if: github.ref == 'refs/heads/main' run: conda config --set anaconda_upload yes - name: Build diff --git a/.github/workflows/build_conda.yml b/.github/workflows/build_conda.yml new file mode 100644 index 00000000..0b3e55c6 --- /dev/null +++ b/.github/workflows/build_conda.yml @@ -0,0 +1,40 @@ +name: Build pkgs and deploy if on main + +on: + push: + branches: + - developer + +jobs: + build: + strategy: + fail-fast: false + matrix: + platform: [ubuntu-latest, ] # macos-12, windows-2019] + python-version: ["3.12",] + + runs-on: ${{ matrix.platform }} + + # The setup-miniconda action needs this to activate miniconda + defaults: + run: + shell: "bash -l {0}" + + steps: + - uses: actions/checkout@v4 + + - name: Get conda + uses: conda-incubator/setup-miniconda@v3.0.4 + with: + python-version: ${{ matrix.python-version }} + channels: conda-forge + + - name: Prepare + run: conda install conda-build=24.9 conda-verify pytest anaconda-client + + - name: Disable upload + run: conda config --set anaconda_upload no + + - name: Build + run: conda build conda-recipe + diff --git a/CMakeLists.txt b/CMakeLists.txt index cd1cd94a..62a3878b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -48,6 +48,7 @@ option(AARE_FETCH_PYBIND11 "Use FetchContent to download pybind11" ON) option(AARE_FETCH_CATCH "Use FetchContent to download catch2" ON) option(AARE_FETCH_JSON "Use FetchContent to download nlohmann::json" ON) option(AARE_FETCH_ZMQ "Use FetchContent to download libzmq" ON) +option(AARE_FETCH_LMFIT "Use FetchContent to download lmfit" ON) #Convenience option to use system libraries only (no FetchContent) @@ -76,6 +77,34 @@ endif() set(CMAKE_EXPORT_COMPILE_COMMANDS ON) +if(AARE_FETCH_LMFIT) + set(lmfit_patch git apply ${CMAKE_CURRENT_SOURCE_DIR}/patches/lmfit.patch) + FetchContent_Declare( + lmfit + GIT_REPOSITORY https://jugit.fz-juelich.de/mlz/lmfit.git + GIT_TAG main + PATCH_COMMAND ${lmfit_patch} + UPDATE_DISCONNECTED 1 + EXCLUDE_FROM_ALL + ) + #Disable what we don't need from lmfit + set(BUILD_TESTING OFF CACHE BOOL "") + set(LMFIT_CPPTEST OFF CACHE BOOL "") + set(LIB_MAN OFF CACHE BOOL "") + set(LMFIT_CPPTEST OFF CACHE BOOL "") + set(BUILD_SHARED_LIBS OFF CACHE BOOL "") + + + FetchContent_MakeAvailable(lmfit) + set_property(TARGET lmfit PROPERTY POSITION_INDEPENDENT_CODE ON) + + target_include_directories (lmfit PUBLIC "${libzmq_SOURCE_DIR}/lib") + message(STATUS "lmfit include dir: ${lmfit_SOURCE_DIR}/lib") +else() + find_package(lmfit REQUIRED) +endif() + + if(AARE_FETCH_ZMQ) # Fetchcontent_Declare is deprecated need to find a way to update this # for now setting the policy to old is enough @@ -127,8 +156,8 @@ if (AARE_FETCH_FMT) LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} - INCLUDES DESTINATION ${CMAKE_INSTALL_INCLUDEDIR} -) + INCLUDES DESTINATION ${CMAKE_INSTALL_INCLUDEDIR} + ) else() find_package(fmt 6 REQUIRED) endif() @@ -146,7 +175,6 @@ if (AARE_FETCH_JSON) install( TARGETS nlohmann_json EXPORT "${TARGETS_EXPORT_NAME}" - ) message(STATUS "target: ${NLOHMANN_JSON_TARGET_NAME}") else() @@ -283,11 +311,14 @@ set(PUBLICHEADERS include/aare/ClusterFile.hpp include/aare/CtbRawFile.hpp include/aare/ClusterVector.hpp + include/aare/decode.hpp include/aare/defs.hpp include/aare/Dtype.hpp include/aare/File.hpp + include/aare/Fit.hpp include/aare/FileInterface.hpp include/aare/Frame.hpp + include/aare/geo_helpers.hpp include/aare/NDArray.hpp include/aare/NDView.hpp include/aare/NumpyFile.hpp @@ -298,6 +329,7 @@ set(PUBLICHEADERS include/aare/RawMasterFile.hpp include/aare/RawSubFile.hpp include/aare/VarClusterFinder.hpp + include/aare/utils/task.hpp ) @@ -307,14 +339,18 @@ set(SourceFiles ${CMAKE_CURRENT_SOURCE_DIR}/src/ClusterFile.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/defs.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/Dtype.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/decode.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/Frame.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/File.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/Fit.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/geo_helpers.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyFile.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyHelpers.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/PixelMap.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/RawFile.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/RawSubFile.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/RawMasterFile.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/utils/task.cpp ) @@ -334,6 +370,7 @@ target_link_libraries( ${STD_FS_LIB} # from helpers.cmake PRIVATE aare_compiler_flags + lmfit ) set_target_properties(aare_core PROPERTIES @@ -350,6 +387,7 @@ if(AARE_TESTS) ${CMAKE_CURRENT_SOURCE_DIR}/src/defs.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/Dtype.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/Frame.test.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/geo_helpers.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/RawMasterFile.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NDArray.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NDView.test.cpp @@ -359,6 +397,7 @@ if(AARE_TESTS) ${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyFile.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/NumpyHelpers.test.cpp ${CMAKE_CURRENT_SOURCE_DIR}/src/RawFile.test.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/src/utils/task.test.cpp ) target_sources(tests PRIVATE ${TestSources} ) @@ -436,4 +475,4 @@ if(AARE_MASTER_PROJECT) set(CMAKE_INSTALL_DIR "share/cmake/${PROJECT_NAME}") set(PROJECT_LIBRARIES aare-core aare-compiler-flags ) include(cmake/package_config.cmake) -endif() \ No newline at end of file +endif() diff --git a/conda-recipe/meta.yaml b/conda-recipe/meta.yaml index c3c823b5..c405e90e 100644 --- a/conda-recipe/meta.yaml +++ b/conda-recipe/meta.yaml @@ -1,6 +1,6 @@ package: name: aare - version: 2024.12.16.dev0 #TODO! how to not duplicate this? + version: 2025.2.12 #TODO! how to not duplicate this? source: diff --git a/docs/CMakeLists.txt b/docs/CMakeLists.txt index 118fd5ce..c693f0ec 100644 --- a/docs/CMakeLists.txt +++ b/docs/CMakeLists.txt @@ -12,28 +12,7 @@ set(SPHINX_BUILD ${CMAKE_CURRENT_BINARY_DIR}) file(GLOB SPHINX_SOURCE_FILES CONFIGURE_DEPENDS "src/*.rst") -# set(SPHINX_SOURCE_FILES -# src/index.rst -# src/Installation.rst -# src/Requirements.rst -# src/NDArray.rst -# src/NDView.rst -# src/File.rst -# src/Frame.rst -# src/Dtype.rst -# src/ClusterFinder.rst -# src/ClusterFile.rst -# src/Pedestal.rst -# src/RawFile.rst -# src/RawSubFile.rst -# src/RawMasterFile.rst -# src/VarClusterFinder.rst -# src/pyVarClusterFinder.rst -# src/pyFile.rst -# src/pyCtbRawFile.rst -# src/pyRawFile.rst -# src/pyRawMasterFile.rst -# ) + foreach(filename ${SPHINX_SOURCE_FILES}) diff --git a/docs/src/ClusterFinderMT.rst b/docs/src/ClusterFinderMT.rst new file mode 100644 index 00000000..b15eb8b8 --- /dev/null +++ b/docs/src/ClusterFinderMT.rst @@ -0,0 +1,7 @@ +ClusterFinderMT +================== + + +.. doxygenclass:: aare::ClusterFinderMT + :members: + :undoc-members: \ No newline at end of file diff --git a/docs/src/index.rst b/docs/src/index.rst index 4316a2cc..905caea8 100644 --- a/docs/src/index.rst +++ b/docs/src/index.rst @@ -30,10 +30,13 @@ AARE pyFile pyCtbRawFile pyClusterFile + pyClusterVector pyRawFile pyRawMasterFile pyVarClusterFinder + pyFit + .. toctree:: :caption: C++ API @@ -45,6 +48,7 @@ AARE File Dtype ClusterFinder + ClusterFinderMT ClusterFile ClusterVector Pedestal diff --git a/docs/src/pyClusterVector.rst b/docs/src/pyClusterVector.rst new file mode 100644 index 00000000..4277920f --- /dev/null +++ b/docs/src/pyClusterVector.rst @@ -0,0 +1,33 @@ +ClusterVector +================ + +The ClusterVector, holds clusters from the ClusterFinder. Since it is templated +in C++ we use a suffix indicating the data type in python. The suffix is +``_i`` for integer, ``_f`` for float, and ``_d`` for double. + +At the moment the functionality from python is limited and it is not supported +to push_back clusters to the vector. The intended use case is to pass it to +C++ functions that support the ClusterVector or to view it as a numpy array. + +**View ClusterVector as numpy array** + +.. code:: python + + from aare import ClusterFile + with ClusterFile("path/to/file") as f: + cluster_vector = f.read_frame() + + # Create a copy of the cluster data in a numpy array + clusters = np.array(cluster_vector) + + # Avoid copying the data by passing copy=False + clusters = np.array(cluster_vector, copy = False) + + +.. py:currentmodule:: aare + +.. autoclass:: ClusterVector_i + :members: + :undoc-members: + :show-inheritance: + :inherited-members: \ No newline at end of file diff --git a/docs/src/pyFit.rst b/docs/src/pyFit.rst new file mode 100644 index 00000000..abaa3cf8 --- /dev/null +++ b/docs/src/pyFit.rst @@ -0,0 +1,19 @@ + +Fit +======== + +.. py:currentmodule:: aare + + +**Functions** + +.. autofunction:: gaus + +.. autofunction:: pol1 + + +**Fitting** + +.. autofunction:: fit_gaus + +.. autofunction:: fit_pol1 \ No newline at end of file diff --git a/include/aare/CircularFifo.hpp b/include/aare/CircularFifo.hpp new file mode 100644 index 00000000..80980829 --- /dev/null +++ b/include/aare/CircularFifo.hpp @@ -0,0 +1,97 @@ +#pragma once + +#include +#include +#include +#include +#include + +#include "aare/ProducerConsumerQueue.hpp" + +namespace aare { + +template class CircularFifo { + uint32_t fifo_size; + aare::ProducerConsumerQueue free_slots; + aare::ProducerConsumerQueue filled_slots; + + public: + CircularFifo() : CircularFifo(100){}; + CircularFifo(uint32_t size) : fifo_size(size), free_slots(size + 1), filled_slots(size + 1) { + + // TODO! how do we deal with alignment for writing? alignas??? + // Do we give the user a chance to provide memory locations? + // Templated allocator? + for (size_t i = 0; i < fifo_size; ++i) { + free_slots.write(ItemType{}); + } + } + + bool next() { + // TODO! avoid default constructing ItemType + ItemType it; + if (!filled_slots.read(it)) + return false; + if (!free_slots.write(std::move(it))) + return false; + return true; + } + + ~CircularFifo() {} + + using value_type = ItemType; + + auto numFilledSlots() const noexcept { return filled_slots.sizeGuess(); } + auto numFreeSlots() const noexcept { return free_slots.sizeGuess(); } + auto isFull() const noexcept { return filled_slots.isFull(); } + + ItemType pop_free() { + ItemType v; + while (!free_slots.read(v)) + ; + return std::move(v); + // return v; + } + + bool try_pop_free(ItemType &v) { return free_slots.read(v); } + + ItemType pop_value(std::chrono::nanoseconds wait, std::atomic &stopped) { + ItemType v; + while (!filled_slots.read(v) && !stopped) { + std::this_thread::sleep_for(wait); + } + return std::move(v); + } + + ItemType pop_value() { + ItemType v; + while (!filled_slots.read(v)) + ; + return std::move(v); + } + + ItemType *frontPtr() { return filled_slots.frontPtr(); } + + // TODO! Add function to move item from filled to free to be used + // with the frontPtr function + + template void push_value(Args &&...recordArgs) { + while (!filled_slots.write(std::forward(recordArgs)...)) + ; + } + + template bool try_push_value(Args &&...recordArgs) { + return filled_slots.write(std::forward(recordArgs)...); + } + + template void push_free(Args &&...recordArgs) { + while (!free_slots.write(std::forward(recordArgs)...)) + ; + } + + template bool try_push_free(Args &&...recordArgs) { + return free_slots.write(std::forward(recordArgs)...); + } +}; + +} // namespace aare \ No newline at end of file diff --git a/include/aare/ClusterCollector.hpp b/include/aare/ClusterCollector.hpp new file mode 100644 index 00000000..07380623 --- /dev/null +++ b/include/aare/ClusterCollector.hpp @@ -0,0 +1,52 @@ +#pragma once +#include +#include + +#include "aare/ProducerConsumerQueue.hpp" +#include "aare/ClusterVector.hpp" +#include "aare/ClusterFinderMT.hpp" + +namespace aare { + +class ClusterCollector{ + ProducerConsumerQueue>* m_source; + std::atomic m_stop_requested{false}; + std::atomic m_stopped{true}; + std::chrono::milliseconds m_default_wait{1}; + std::thread m_thread; + std::vector> m_clusters; + + void process(){ + m_stopped = false; + fmt::print("ClusterCollector started\n"); + while (!m_stop_requested || !m_source->isEmpty()) { + if (ClusterVector *clusters = m_source->frontPtr(); + clusters != nullptr) { + m_clusters.push_back(std::move(*clusters)); + m_source->popFront(); + }else{ + std::this_thread::sleep_for(m_default_wait); + } + } + fmt::print("ClusterCollector stopped\n"); + m_stopped = true; + } + + public: + ClusterCollector(ClusterFinderMT* source){ + m_source = source->sink(); + m_thread = std::thread(&ClusterCollector::process, this); + } + void stop(){ + m_stop_requested = true; + m_thread.join(); + } + std::vector> steal_clusters(){ + if(!m_stopped){ + throw std::runtime_error("ClusterCollector is still running"); + } + return std::move(m_clusters); + } +}; + +} // namespace aare \ No newline at end of file diff --git a/include/aare/ClusterFile.hpp b/include/aare/ClusterFile.hpp index a484560f..b7967638 100644 --- a/include/aare/ClusterFile.hpp +++ b/include/aare/ClusterFile.hpp @@ -33,6 +33,12 @@ typedef enum { pTopRight = 8 } pixel; +struct Eta2 { + double x; + double y; + corner c; +}; + struct ClusterAnalysis { uint32_t c; int32_t tot; @@ -49,6 +55,19 @@ int32_t frame_number uint32_t number_of_clusters .... */ + +/** + * @brief Class to read and write cluster files + * Expects data to be laid out as: + * + * + * int32_t frame_number + * uint32_t number_of_clusters + * int16_t x, int16_t y, int32_t data[9] x number_of_clusters + * int32_t frame_number + * uint32_t number_of_clusters + * etc. + */ class ClusterFile { FILE *fp{}; uint32_t m_num_left{}; @@ -56,26 +75,61 @@ class ClusterFile { const std::string m_mode; public: + /** + * @brief Construct a new Cluster File object + * @param fname path to the file + * @param chunk_size number of clusters to read at a time when iterating + * over the file + * @param mode mode to open the file in. "r" for reading, "w" for writing, + * "a" for appending + * @throws std::runtime_error if the file could not be opened + */ ClusterFile(const std::filesystem::path &fname, size_t chunk_size = 1000, const std::string &mode = "r"); + + ~ClusterFile(); - std::vector read_clusters(size_t n_clusters); - std::vector read_frame(int32_t &out_fnum); - void write_frame(int32_t frame_number, - const ClusterVector &clusters); - std::vector - read_cluster_with_cut(size_t n_clusters, double *noise_map, int nx, int ny); + /** + * @brief Read n_clusters clusters from the file discarding frame numbers. + * If EOF is reached the returned vector will have less than n_clusters + * clusters + */ + ClusterVector read_clusters(size_t n_clusters); + + /** + * @brief Read a single frame from the file and return the clusters. The + * cluster vector will have the frame number set. + * @throws std::runtime_error if the file is not opened for reading or the file pointer not + * at the beginning of a frame + */ + ClusterVector read_frame(); + + + void write_frame(const ClusterVector &clusters); + + // Need to be migrated to support NDArray and return a ClusterVector + // std::vector + // read_cluster_with_cut(size_t n_clusters, double *noise_map, int nx, int ny); + + /** + * @brief Return the chunk size + */ size_t chunk_size() const { return m_chunk_size; } + + + /** + * @brief Close the file. If not closed the file will be closed in the destructor + */ void close(); }; int analyze_data(int32_t *data, int32_t *t2, int32_t *t3, char *quad, double *eta2x, double *eta2y, double *eta3x, double *eta3y); -int analyze_cluster(Cluster3x3& cl, int32_t *t2, int32_t *t3, char *quad, +int analyze_cluster(Cluster3x3 &cl, int32_t *t2, int32_t *t3, char *quad, double *eta2x, double *eta2y, double *eta3x, double *eta3y); -NDArray calculate_eta2( ClusterVector& clusters); -std::array calculate_eta2( Cluster3x3& cl); +NDArray calculate_eta2(ClusterVector &clusters); +Eta2 calculate_eta2(Cluster3x3 &cl); } // namespace aare diff --git a/include/aare/ClusterFileSink.hpp b/include/aare/ClusterFileSink.hpp new file mode 100644 index 00000000..158fdebb --- /dev/null +++ b/include/aare/ClusterFileSink.hpp @@ -0,0 +1,56 @@ +#pragma once +#include +#include +#include + +#include "aare/ProducerConsumerQueue.hpp" +#include "aare/ClusterVector.hpp" +#include "aare/ClusterFinderMT.hpp" + +namespace aare{ + +class ClusterFileSink{ + ProducerConsumerQueue>* m_source; + std::atomic m_stop_requested{false}; + std::atomic m_stopped{true}; + std::chrono::milliseconds m_default_wait{1}; + std::thread m_thread; + std::ofstream m_file; + + + void process(){ + m_stopped = false; + fmt::print("ClusterFileSink started\n"); + while (!m_stop_requested || !m_source->isEmpty()) { + if (ClusterVector *clusters = m_source->frontPtr(); + clusters != nullptr) { + // Write clusters to file + int32_t frame_number = clusters->frame_number(); //TODO! Should we store frame number already as int? + uint32_t num_clusters = clusters->size(); + m_file.write(reinterpret_cast(&frame_number), sizeof(frame_number)); + m_file.write(reinterpret_cast(&num_clusters), sizeof(num_clusters)); + m_file.write(reinterpret_cast(clusters->data()), clusters->size() * clusters->item_size()); + m_source->popFront(); + }else{ + std::this_thread::sleep_for(m_default_wait); + } + } + fmt::print("ClusterFileSink stopped\n"); + m_stopped = true; + } + + public: + ClusterFileSink(ClusterFinderMT* source, const std::filesystem::path& fname){ + m_source = source->sink(); + m_thread = std::thread(&ClusterFileSink::process, this); + m_file.open(fname, std::ios::binary); + } + void stop(){ + m_stop_requested = true; + m_thread.join(); + m_file.close(); + } +}; + + +} // namespace aare \ No newline at end of file diff --git a/include/aare/ClusterFinder.hpp b/include/aare/ClusterFinder.hpp index 8bd77ccd..84b207b3 100644 --- a/include/aare/ClusterFinder.hpp +++ b/include/aare/ClusterFinder.hpp @@ -10,26 +10,12 @@ namespace aare { -/** enum to define the event types */ -enum class eventType { - PEDESTAL, /** pedestal */ - NEIGHBOUR, /** neighbour i.e. below threshold, but in the cluster of a - photon */ - PHOTON, /** photon i.e. above threshold */ - PHOTON_MAX, /** maximum of a cluster satisfying the photon conditions */ - NEGATIVE_PEDESTAL, /** negative value, will not be accounted for as pedestal - in order to avoid drift of the pedestal towards - negative values */ - UNDEFINED_EVENT = -1 /** undefined */ -}; - template class ClusterFinder { Shape<2> m_image_size; const int m_cluster_sizeX; const int m_cluster_sizeY; - // const PEDESTAL_TYPE m_threshold; const PEDESTAL_TYPE m_nSigma; const PEDESTAL_TYPE c2; const PEDESTAL_TYPE c3; @@ -61,6 +47,7 @@ class ClusterFinder { NDArray pedestal() { return m_pedestal.mean(); } NDArray noise() { return m_pedestal.std(); } + void clear_pedestal() { m_pedestal.clear(); } /** * @brief Move the clusters from the ClusterVector in the ClusterFinder to a @@ -78,13 +65,13 @@ class ClusterFinder { m_clusters = ClusterVector(m_cluster_sizeX, m_cluster_sizeY); return tmp; } - void find_clusters(NDView frame) { + void find_clusters(NDView frame, uint64_t frame_number = 0) { // // TODO! deal with even size clusters // // currently 3,3 -> +/- 1 // // 4,4 -> +/- 2 int dy = m_cluster_sizeY / 2; int dx = m_cluster_sizeX / 2; - + m_clusters.set_frame_number(frame_number); std::vector cluster_data(m_cluster_sizeX * m_cluster_sizeY); for (int iy = 0; iy < frame.shape(0); iy++) { for (int ix = 0; ix < frame.shape(1); ix++) { @@ -121,8 +108,8 @@ class ClusterFinder { } else if (total > c3 * m_nSigma * rms) { // pass } else { - // m_pedestal.push(iy, ix, frame(iy, ix)); - m_pedestal.push_fast(iy, ix, frame(iy, ix)); + // m_pedestal.push(iy, ix, frame(iy, ix)); // Safe option + m_pedestal.push_fast(iy, ix, frame(iy, ix)); // Assume we have reached n_samples in the pedestal, slight performance improvement continue; // It was a pedestal value nothing to store } @@ -157,114 +144,6 @@ class ClusterFinder { } } } - - // // template - // std::vector - // find_clusters_with_threshold(NDView frame, - // Pedestal &pedestal) { - // assert(m_threshold > 0); - // std::vector clusters; - // std::vector> eventMask; - // for (int i = 0; i < frame.shape(0); i++) { - // eventMask.push_back(std::vector(frame.shape(1))); - // } - // double tthr, tthr1, tthr2; - - // NDArray rest({frame.shape(0), frame.shape(1)}); - // NDArray nph({frame.shape(0), frame.shape(1)}); - // // convert to n photons - // // nph = (frame-pedestal.mean()+0.5*m_threshold)/m_threshold; // can - // be - // // optimized with expression templates? - // for (int iy = 0; iy < frame.shape(0); iy++) { - // for (int ix = 0; ix < frame.shape(1); ix++) { - // auto val = frame(iy, ix) - pedestal.mean(iy, ix); - // nph(iy, ix) = (val + 0.5 * m_threshold) / m_threshold; - // nph(iy, ix) = nph(iy, ix) < 0 ? 0 : nph(iy, ix); - // rest(iy, ix) = val - nph(iy, ix) * m_threshold; - // } - // } - // // iterate over frame pixels - // for (int iy = 0; iy < frame.shape(0); iy++) { - // for (int ix = 0; ix < frame.shape(1); ix++) { - // eventMask[iy][ix] = eventType::PEDESTAL; - // // initialize max and total - // FRAME_TYPE max = std::numeric_limits::min(); - // long double total = 0; - // if (rest(iy, ix) <= 0.25 * m_threshold) { - // pedestal.push(iy, ix, frame(iy, ix)); - // continue; - // } - // eventMask[iy][ix] = eventType::NEIGHBOUR; - // // iterate over cluster pixels around the current pixel - // (ix,iy) for (short ir = -(m_cluster_sizeY / 2); - // ir < (m_cluster_sizeY / 2) + 1; ir++) { - // for (short ic = -(m_cluster_sizeX / 2); - // ic < (m_cluster_sizeX / 2) + 1; ic++) { - // if (ix + ic >= 0 && ix + ic < frame.shape(1) && - // iy + ir >= 0 && iy + ir < frame.shape(0)) { - // auto val = frame(iy + ir, ix + ic) - - // pedestal.mean(iy + ir, ix + ic); - // total += val; - // if (val > max) { - // max = val; - // } - // } - // } - // } - - // auto rms = pedestal.std(iy, ix); - // if (m_nSigma == 0) { - // tthr = m_threshold; - // tthr1 = m_threshold; - // tthr2 = m_threshold; - // } else { - // tthr = m_nSigma * rms; - // tthr1 = m_nSigma * rms * c3; - // tthr2 = m_nSigma * rms * c2; - - // if (m_threshold > 2 * tthr) - // tthr = m_threshold - tthr; - // if (m_threshold > 2 * tthr1) - // tthr1 = tthr - tthr1; - // if (m_threshold > 2 * tthr2) - // tthr2 = tthr - tthr2; - // } - // if (total > tthr1 || max > tthr) { - // eventMask[iy][ix] = eventType::PHOTON; - // nph(iy, ix) += 1; - // rest(iy, ix) -= m_threshold; - // } else { - // pedestal.push(iy, ix, frame(iy, ix)); - // continue; - // } - // if (eventMask[iy][ix] == eventType::PHOTON && - // frame(iy, ix) - pedestal.mean(iy, ix) >= max) { - // eventMask[iy][ix] = eventType::PHOTON_MAX; - // DynamicCluster cluster(m_cluster_sizeX, m_cluster_sizeY, - // Dtype(typeid(FRAME_TYPE))); - // cluster.x = ix; - // cluster.y = iy; - // short i = 0; - // for (short ir = -(m_cluster_sizeY / 2); - // ir < (m_cluster_sizeY / 2) + 1; ir++) { - // for (short ic = -(m_cluster_sizeX / 2); - // ic < (m_cluster_sizeX / 2) + 1; ic++) { - // if (ix + ic >= 0 && ix + ic < frame.shape(1) && - // iy + ir >= 0 && iy + ir < frame.shape(0)) { - // auto tmp = frame(iy + ir, ix + ic) - - // pedestal.mean(iy + ir, ix + ic); - // cluster.set(i, tmp); - // i++; - // } - // } - // } - // clusters.push_back(cluster); - // } - // } - // } - // return clusters; - // } }; } // namespace aare \ No newline at end of file diff --git a/include/aare/ClusterFinderMT.hpp b/include/aare/ClusterFinderMT.hpp new file mode 100644 index 00000000..1efb8439 --- /dev/null +++ b/include/aare/ClusterFinderMT.hpp @@ -0,0 +1,268 @@ +#pragma once +#include +#include +#include +#include +#include + +#include "aare/ClusterFinder.hpp" +#include "aare/NDArray.hpp" +#include "aare/ProducerConsumerQueue.hpp" + +namespace aare { + +enum class FrameType { + DATA, + PEDESTAL, +}; + +struct FrameWrapper { + FrameType type; + uint64_t frame_number; + NDArray data; +}; + +/** + * @brief ClusterFinderMT is a multi-threaded version of ClusterFinder. It uses + * a producer-consumer queue to distribute the frames to the threads. The + * clusters are collected in a single output queue. + * @tparam FRAME_TYPE type of the frame data + * @tparam PEDESTAL_TYPE type of the pedestal data + * @tparam CT type of the cluster data + */ +template +class ClusterFinderMT { + size_t m_current_thread{0}; + size_t m_n_threads{0}; + using Finder = ClusterFinder; + using InputQueue = ProducerConsumerQueue; + using OutputQueue = ProducerConsumerQueue>; + std::vector> m_input_queues; + std::vector> m_output_queues; + + OutputQueue m_sink{1000}; // All clusters go into this queue + + std::vector> m_cluster_finders; + std::vector m_threads; + std::thread m_collect_thread; + std::chrono::milliseconds m_default_wait{1}; + + std::atomic m_stop_requested{false}; + std::atomic m_processing_threads_stopped{true}; + + /** + * @brief Function called by the processing threads. It reads the frames + * from the input queue and processes them. + */ + void process(int thread_id) { + auto cf = m_cluster_finders[thread_id].get(); + auto q = m_input_queues[thread_id].get(); + bool realloc_same_capacity = true; + + while (!m_stop_requested || !q->isEmpty()) { + if (FrameWrapper *frame = q->frontPtr(); frame != nullptr) { + + switch (frame->type) { + case FrameType::DATA: + cf->find_clusters(frame->data.view(), frame->frame_number); + m_output_queues[thread_id]->write(cf->steal_clusters(realloc_same_capacity)); + break; + + case FrameType::PEDESTAL: + m_cluster_finders[thread_id]->push_pedestal_frame( + frame->data.view()); + break; + } + + // frame is processed now discard it + m_input_queues[thread_id]->popFront(); + } else { + std::this_thread::sleep_for(m_default_wait); + } + } + } + + /** + * @brief Collect all the clusters from the output queues and write them to + * the sink + */ + void collect() { + bool empty = true; + while (!m_stop_requested || !empty || !m_processing_threads_stopped) { + empty = true; + for (auto &queue : m_output_queues) { + if (!queue->isEmpty()) { + + while (!m_sink.write(std::move(*queue->frontPtr()))) { + std::this_thread::sleep_for(m_default_wait); + } + queue->popFront(); + empty = false; + } + } + } + } + + public: + /** + * @brief Construct a new ClusterFinderMT object + * @param image_size size of the image + * @param cluster_size size of the cluster + * @param nSigma number of sigma above the pedestal to consider a photon + * @param capacity initial capacity of the cluster vector. Should match + * expected number of clusters in a frame per frame. + * @param n_threads number of threads to use + */ + ClusterFinderMT(Shape<2> image_size, Shape<2> cluster_size, + PEDESTAL_TYPE nSigma = 5.0, size_t capacity = 2000, + size_t n_threads = 3) + : m_n_threads(n_threads) { + for (size_t i = 0; i < n_threads; i++) { + m_cluster_finders.push_back( + std::make_unique>( + image_size, cluster_size, nSigma, capacity)); + } + for (size_t i = 0; i < n_threads; i++) { + m_input_queues.emplace_back(std::make_unique(200)); + m_output_queues.emplace_back(std::make_unique(200)); + } + //TODO! Should we start automatically? + start(); + } + + /** + * @brief Return the sink queue where all the clusters are collected + * @warning You need to empty this queue otherwise the cluster finder will wait forever + */ + ProducerConsumerQueue> *sink() { return &m_sink; } + + /** + * @brief Start all processing threads + */ + void start() { + m_processing_threads_stopped = false; + m_stop_requested = false; + + for (size_t i = 0; i < m_n_threads; i++) { + m_threads.push_back( + std::thread(&ClusterFinderMT::process, this, i)); + } + + m_collect_thread = std::thread(&ClusterFinderMT::collect, this); + } + + /** + * @brief Stop all processing threads + */ + void stop() { + m_stop_requested = true; + + for (auto &thread : m_threads) { + thread.join(); + } + m_threads.clear(); + + m_processing_threads_stopped = true; + m_collect_thread.join(); + } + + /** + * @brief Wait for all the queues to be empty. Mostly used for timing tests. + */ + void sync() { + for (auto &q : m_input_queues) { + while (!q->isEmpty()) { + std::this_thread::sleep_for(m_default_wait); + } + } + for (auto &q : m_output_queues) { + while (!q->isEmpty()) { + std::this_thread::sleep_for(m_default_wait); + } + } + while (!m_sink.isEmpty()) { + std::this_thread::sleep_for(m_default_wait); + } + } + + /** + * @brief Push a pedestal frame to all the cluster finders. The frames is + * expected to be dark. No photon finding is done. Just pedestal update. + */ + void push_pedestal_frame(NDView frame) { + FrameWrapper fw{FrameType::PEDESTAL, 0, + NDArray(frame)}; // TODO! copies the data! + + for (auto &queue : m_input_queues) { + while (!queue->write(fw)) { + std::this_thread::sleep_for(m_default_wait); + } + } + } + + /** + * @brief Push the frame to the queue of the next available thread. Function + * returns once the frame is in a queue. + * @note Spin locks with a default wait if the queue is full. + */ + void find_clusters(NDView frame, uint64_t frame_number = 0) { + FrameWrapper fw{FrameType::DATA, frame_number, + NDArray(frame)}; // TODO! copies the data! + while (!m_input_queues[m_current_thread % m_n_threads]->write(fw)) { + std::this_thread::sleep_for(m_default_wait); + } + m_current_thread++; + } + + void clear_pedestal() { + if (!m_processing_threads_stopped) { + throw std::runtime_error("ClusterFinderMT is still running"); + } + for (auto &cf : m_cluster_finders) { + cf->clear_pedestal(); + } + } + + /** + * @brief Return the pedestal currently used by the cluster finder + * @param thread_index index of the thread + */ + auto pedestal(size_t thread_index = 0) { + if (m_cluster_finders.empty()) { + throw std::runtime_error("No cluster finders available"); + } + if (!m_processing_threads_stopped) { + throw std::runtime_error("ClusterFinderMT is still running"); + } + if (thread_index >= m_cluster_finders.size()) { + throw std::runtime_error("Thread index out of range"); + } + return m_cluster_finders[thread_index]->pedestal(); + } + + /** + * @brief Return the noise currently used by the cluster finder + * @param thread_index index of the thread + */ + auto noise(size_t thread_index = 0) { + if (m_cluster_finders.empty()) { + throw std::runtime_error("No cluster finders available"); + } + if (!m_processing_threads_stopped) { + throw std::runtime_error("ClusterFinderMT is still running"); + } + if (thread_index >= m_cluster_finders.size()) { + throw std::runtime_error("Thread index out of range"); + } + return m_cluster_finders[thread_index]->noise(); + } + + // void push(FrameWrapper&& frame) { + // //TODO! need to loop until we are successful + // auto rc = m_input_queue.write(std::move(frame)); + // fmt::print("pushed frame {}\n", rc); + // } +}; + +} // namespace aare \ No newline at end of file diff --git a/include/aare/ClusterVector.hpp b/include/aare/ClusterVector.hpp index 98d4b373..febf06c8 100644 --- a/include/aare/ClusterVector.hpp +++ b/include/aare/ClusterVector.hpp @@ -1,4 +1,6 @@ #pragma once +#include +#include #include #include #include @@ -9,19 +11,24 @@ namespace aare { /** - * @brief ClusterVector is a container for clusters of various sizes. It uses a - * contiguous memory buffer to store the clusters. + * @brief ClusterVector is a container for clusters of various sizes. It uses a + * contiguous memory buffer to store the clusters. It is templated on the data + * type and the coordinate type of the clusters. * @note push_back can invalidate pointers to elements in the container + * @warning ClusterVector is currently move only to catch unintended copies, but + * this might change since there are probably use cases where copying is needed. * @tparam T data type of the pixels in the cluster - * @tparam CoordType data type of the x and y coordinates of the cluster (normally int16_t) + * @tparam CoordType data type of the x and y coordinates of the cluster + * (normally int16_t) */ -template class ClusterVector { +template class ClusterVector { using value_type = T; size_t m_cluster_size_x; size_t m_cluster_size_y; std::byte *m_data{}; size_t m_size{0}; size_t m_capacity; + uint64_t m_frame_number{0}; // TODO! Check frame number size and type /* Format string used in the python bindings to create a numpy array from the buffer @@ -30,7 +37,7 @@ template class ClusterVector { d - double i - int */ - constexpr static char m_fmt_base[] = "=h:x:\nh:y:\n({},{}){}:data:" ; + constexpr static char m_fmt_base[] = "=h:x:\nh:y:\n({},{}){}:data:"; public: /** @@ -38,30 +45,31 @@ template class ClusterVector { * @param cluster_size_x size of the cluster in x direction * @param cluster_size_y size of the cluster in y direction * @param capacity initial capacity of the buffer in number of clusters + * @param frame_number frame number of the clusters. Default is 0, which is + * also used to indicate that the clusters come from many frames */ - ClusterVector(size_t cluster_size_x, size_t cluster_size_y, - size_t capacity = 1024) + ClusterVector(size_t cluster_size_x = 3, size_t cluster_size_y = 3, + size_t capacity = 1024, uint64_t frame_number = 0) : m_cluster_size_x(cluster_size_x), m_cluster_size_y(cluster_size_y), - m_capacity(capacity) { + m_capacity(capacity), m_frame_number(frame_number) { allocate_buffer(capacity); } - ~ClusterVector() { - delete[] m_data; - } - - //Move constructor + ~ClusterVector() { delete[] m_data; } + + // Move constructor ClusterVector(ClusterVector &&other) noexcept : m_cluster_size_x(other.m_cluster_size_x), m_cluster_size_y(other.m_cluster_size_y), m_data(other.m_data), - m_size(other.m_size), m_capacity(other.m_capacity) { + m_size(other.m_size), m_capacity(other.m_capacity), + m_frame_number(other.m_frame_number) { other.m_data = nullptr; other.m_size = 0; other.m_capacity = 0; } - //Move assignment operator - ClusterVector& operator=(ClusterVector &&other) noexcept { + // Move assignment operator + ClusterVector &operator=(ClusterVector &&other) noexcept { if (this != &other) { delete[] m_data; m_cluster_size_x = other.m_cluster_size_x; @@ -69,9 +77,11 @@ template class ClusterVector { m_data = other.m_data; m_size = other.m_size; m_capacity = other.m_capacity; + m_frame_number = other.m_frame_number; other.m_data = nullptr; other.m_size = 0; other.m_capacity = 0; + other.m_frame_number = 0; } return *this; } @@ -79,7 +89,8 @@ template class ClusterVector { /** * @brief Reserve space for at least capacity clusters * @param capacity number of clusters to reserve space for - * @note If capacity is less than the current capacity, the function does nothing. + * @note If capacity is less than the current capacity, the function does + * nothing. */ void reserve(size_t capacity) { if (capacity > m_capacity) { @@ -92,7 +103,8 @@ template class ClusterVector { * @param x x-coordinate of the cluster * @param y y-coordinate of the cluster * @param data pointer to the data of the cluster - * @warning The data pointer must point to a buffer of size cluster_size_x * cluster_size_y * sizeof(T) + * @warning The data pointer must point to a buffer of size cluster_size_x * + * cluster_size_y * sizeof(T) */ void push_back(CoordType x, CoordType y, const std::byte *data) { if (m_size == m_capacity) { @@ -108,7 +120,15 @@ template class ClusterVector { ptr); m_size++; } - + ClusterVector &operator+=(const ClusterVector &other) { + if (m_size + other.m_size > m_capacity) { + allocate_buffer(m_capacity + other.m_size); + } + std::copy(other.m_data, other.m_data + other.m_size * item_size(), + m_data + m_size * item_size()); + m_size += other.m_size; + return *this; + } /** * @brief Sum the pixels in each cluster @@ -116,7 +136,7 @@ template class ClusterVector { */ std::vector sum() { std::vector sums(m_size); - const size_t stride = element_offset(); + const size_t stride = item_size(); const size_t n_pixels = m_cluster_size_x * m_cluster_size_y; std::byte *ptr = m_data + 2 * sizeof(CoordType); // skip x and y @@ -129,26 +149,73 @@ template class ClusterVector { return sums; } + /** + * @brief Return the maximum sum of the 2x2 subclusters in each cluster + * @return std::vector vector of sums for each cluster + * @throws std::runtime_error if the cluster size is not 3x3 + * @warning Only 3x3 clusters are supported for the 2x2 sum. + */ + std::vector sum_2x2() { + std::vector sums(m_size); + const size_t stride = item_size(); + + if (m_cluster_size_x != 3 || m_cluster_size_y != 3) { + throw std::runtime_error( + "Only 3x3 clusters are supported for the 2x2 sum."); + } + std::byte *ptr = m_data + 2 * sizeof(CoordType); // skip x and y + + for (size_t i = 0; i < m_size; i++) { + std::array total; + auto T_ptr = reinterpret_cast(ptr); + total[0] = T_ptr[0] + T_ptr[1] + T_ptr[3] + T_ptr[4]; + total[1] = T_ptr[1] + T_ptr[2] + T_ptr[4] + T_ptr[5]; + total[2] = T_ptr[3] + T_ptr[4] + T_ptr[6] + T_ptr[7]; + total[3] = T_ptr[4] + T_ptr[5] + T_ptr[7] + T_ptr[8]; + + sums[i] = *std::max_element(total.begin(), total.end()); + ptr += stride; + } + + return sums; + } + + /** + * @brief Return the number of clusters in the vector + */ size_t size() const { return m_size; } + + /** + * @brief Return the capacity of the buffer in number of clusters. This is + * the number of clusters that can be stored in the current buffer without + * reallocation. + */ size_t capacity() const { return m_capacity; } - + /** - * @brief Return the offset in bytes for a single cluster + * @brief Return the size in bytes of a single cluster */ - size_t element_offset() const { - return 2*sizeof(CoordType) + + size_t item_size() const { + return 2 * sizeof(CoordType) + m_cluster_size_x * m_cluster_size_y * sizeof(T); } + /** * @brief Return the offset in bytes for the i-th cluster */ - size_t element_offset(size_t i) const { return element_offset() * i; } + size_t element_offset(size_t i) const { return item_size() * i; } /** * @brief Return a pointer to the i-th cluster */ std::byte *element_ptr(size_t i) { return m_data + element_offset(i); } - const std::byte * element_ptr(size_t i) const { return m_data + element_offset(i); } + + /** + * @brief Return a pointer to the i-th cluster + */ + const std::byte *element_ptr(size_t i) const { + return m_data + element_offset(i); + } size_t cluster_size_x() const { return m_cluster_size_x; } size_t cluster_size_y() const { return m_cluster_size_y; } @@ -156,21 +223,49 @@ template class ClusterVector { std::byte *data() { return m_data; } std::byte const *data() const { return m_data; } - template - V& at(size_t i) { - return *reinterpret_cast(element_ptr(i)); + /** + * @brief Return a reference to the i-th cluster casted to type V + * @tparam V type of the cluster + */ + template V &at(size_t i) { + return *reinterpret_cast(element_ptr(i)); } const std::string_view fmt_base() const { - //TODO! how do we match on coord_t? + // TODO! how do we match on coord_t? return m_fmt_base; } + /** + * @brief Return the frame number of the clusters. 0 is used to indicate + * that the clusters come from many frames + */ + uint64_t frame_number() const { return m_frame_number; } + + void set_frame_number(uint64_t frame_number) { + m_frame_number = frame_number; + } + + /** + * @brief Resize the vector to contain new_size clusters. If new_size is + * greater than the current capacity, a new buffer is allocated. If the size + * is smaller no memory is freed, size is just updated. + * @param new_size new size of the vector + * @warning The additional clusters are not initialized + */ + void resize(size_t new_size) { + // TODO! Should we initialize the new clusters? + if (new_size > m_capacity) { + allocate_buffer(new_size); + } + m_size = new_size; + } + private: void allocate_buffer(size_t new_capacity) { - size_t num_bytes = element_offset() * new_capacity; + size_t num_bytes = item_size() * new_capacity; std::byte *new_data = new std::byte[num_bytes]{}; - std::copy(m_data, m_data + element_offset() * m_size, new_data); + std::copy(m_data, m_data + item_size() * m_size, new_data); delete[] m_data; m_data = new_data; m_capacity = new_capacity; diff --git a/include/aare/File.hpp b/include/aare/File.hpp index 7aa30e1b..1cef8985 100644 --- a/include/aare/File.hpp +++ b/include/aare/File.hpp @@ -36,6 +36,8 @@ class File { File(File &&other) noexcept; File& operator=(File &&other) noexcept; ~File() = default; + + // void close(); //!< close the file Frame read_frame(); //!< read one frame from the file at the current position Frame read_frame(size_t frame_index); //!< read one frame at the position given by frame number diff --git a/include/aare/Fit.hpp b/include/aare/Fit.hpp new file mode 100644 index 00000000..20ef4ef4 --- /dev/null +++ b/include/aare/Fit.hpp @@ -0,0 +1,76 @@ +#pragma once + +#include +#include +#include + +#include "aare/NDArray.hpp" + +namespace aare { + +namespace func { +double gaus(const double x, const double *par); +NDArray gaus(NDView x, NDView par); + +double pol1(const double x, const double *par); +NDArray pol1(NDView x, NDView par); + +} // namespace func + +static constexpr int DEFAULT_NUM_THREADS = 4; + +/** + * @brief Fit a 1D Gaussian to data. + * @param data data to fit + * @param x x values + */ +NDArray fit_gaus(NDView x, NDView y); + + +/** + * @brief Fit a 1D Gaussian to each pixel. Data layout [row, col, values] + * @param x x values + * @param y y vales, layout [row, col, values] + * @param n_threads number of threads to use + */ +NDArray fit_gaus(NDView x, NDView y, int n_threads = DEFAULT_NUM_THREADS); + + +/** + * @brief Fit a 1D Gaussian with error estimates + * @param x x values + * @param y y vales, layout [row, col, values] + * @param y_err error in y, layout [row, col, values] + * @param par_out output parameters + * @param par_err_out output error parameters + */ +void fit_gaus(NDView x, NDView y, NDView y_err, + NDView par_out, NDView par_err_out); + +/** + * @brief Fit a 1D Gaussian to each pixel with error estimates. Data layout [row, col, values] + * @param x x values + * @param y y vales, layout [row, col, values] + * @param y_err error in y, layout [row, col, values] + * @param par_out output parameters, layout [row, col, values] + * @param par_err_out output parameter errors, layout [row, col, values] + * @param n_threads number of threads to use + */ +void fit_gaus(NDView x, NDView y, NDView y_err, + NDView par_out, NDView par_err_out, int n_threads = DEFAULT_NUM_THREADS); + + +NDArray fit_pol1(NDView x, NDView y); + +NDArray fit_pol1(NDView x, NDView y, int n_threads = DEFAULT_NUM_THREADS); + +void fit_pol1(NDView x, NDView y, + NDView y_err, NDView par_out, + NDView par_err_out); + +//TODO! not sure we need to offer the different version in C++ +void fit_pol1(NDView x, NDView y, + NDView y_err, NDView par_out, + NDView par_err_out, int n_threads = DEFAULT_NUM_THREADS); + +} // namespace aare \ No newline at end of file diff --git a/include/aare/Pedestal.hpp b/include/aare/Pedestal.hpp index bda94f21..102d7304 100644 --- a/include/aare/Pedestal.hpp +++ b/include/aare/Pedestal.hpp @@ -89,6 +89,7 @@ template class Pedestal { m_sum = 0; m_sum2 = 0; m_cur_samples = 0; + m_mean = 0; } @@ -97,6 +98,7 @@ template class Pedestal { m_sum(row, col) = 0; m_sum2(row, col) = 0; m_cur_samples(row, col) = 0; + m_mean(row, col) = 0; } @@ -119,7 +121,7 @@ template class Pedestal { /** * Push but don't update the cached mean. Speeds up the process - * when intitializing the pedestal. + * when initializing the pedestal. * */ template void push_no_update(NDView frame) { @@ -165,8 +167,8 @@ template class Pedestal { m_sum2(row, col) += val * val; m_cur_samples(row, col)++; } else { - m_sum(row, col) += val - m_sum(row, col) / m_cur_samples(row, col); - m_sum2(row, col) += val * val - m_sum2(row, col) / m_cur_samples(row, col); + m_sum(row, col) += val - m_sum(row, col) / m_samples; + m_sum2(row, col) += val * val - m_sum2(row, col) / m_samples; } //Since we just did a push we know that m_cur_samples(row, col) is at least 1 m_mean(row, col) = m_sum(row, col) / m_cur_samples(row, col); diff --git a/include/aare/ProducerConsumerQueue.hpp b/include/aare/ProducerConsumerQueue.hpp new file mode 100644 index 00000000..426b9e2e --- /dev/null +++ b/include/aare/ProducerConsumerQueue.hpp @@ -0,0 +1,203 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// @author Bo Hu (bhu@fb.com) +// @author Jordan DeLong (delong.j@fb.com) + +// Changes made by PSD Detector Group: +// Copied: Line 34 constexpr std::size_t hardware_destructive_interference_size = 128; from folly/lang/Align.h +// Changed extension to .hpp +// Changed namespace to aare + +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +constexpr std::size_t hardware_destructive_interference_size = 128; +namespace aare { + +/* + * ProducerConsumerQueue is a one producer and one consumer queue + * without locks. + */ +template struct ProducerConsumerQueue { + typedef T value_type; + + ProducerConsumerQueue(const ProducerConsumerQueue &) = delete; + ProducerConsumerQueue &operator=(const ProducerConsumerQueue &) = delete; + + + ProducerConsumerQueue(ProducerConsumerQueue &&other){ + size_ = other.size_; + records_ = other.records_; + other.records_ = nullptr; + readIndex_ = other.readIndex_.load(std::memory_order_acquire); + writeIndex_ = other.writeIndex_.load(std::memory_order_acquire); + } + ProducerConsumerQueue &operator=(ProducerConsumerQueue &&other){ + size_ = other.size_; + records_ = other.records_; + other.records_ = nullptr; + readIndex_ = other.readIndex_.load(std::memory_order_acquire); + writeIndex_ = other.writeIndex_.load(std::memory_order_acquire); + return *this; + } + + + ProducerConsumerQueue():ProducerConsumerQueue(2){}; + // size must be >= 2. + // + // Also, note that the number of usable slots in the queue at any + // given time is actually (size-1), so if you start with an empty queue, + // isFull() will return true after size-1 insertions. + explicit ProducerConsumerQueue(uint32_t size) + : size_(size), records_(static_cast(std::malloc(sizeof(T) * size))), readIndex_(0), writeIndex_(0) { + assert(size >= 2); + if (!records_) { + throw std::bad_alloc(); + } + } + + ~ProducerConsumerQueue() { + // We need to destruct anything that may still exist in our queue. + // (No real synchronization needed at destructor time: only one + // thread can be doing this.) + if (!std::is_trivially_destructible::value) { + size_t readIndex = readIndex_; + size_t endIndex = writeIndex_; + while (readIndex != endIndex) { + records_[readIndex].~T(); + if (++readIndex == size_) { + readIndex = 0; + } + } + } + + std::free(records_); + } + + template bool write(Args &&...recordArgs) { + auto const currentWrite = writeIndex_.load(std::memory_order_relaxed); + auto nextRecord = currentWrite + 1; + if (nextRecord == size_) { + nextRecord = 0; + } + if (nextRecord != readIndex_.load(std::memory_order_acquire)) { + new (&records_[currentWrite]) T(std::forward(recordArgs)...); + writeIndex_.store(nextRecord, std::memory_order_release); + return true; + } + + // queue is full + return false; + } + + // move (or copy) the value at the front of the queue to given variable + bool read(T &record) { + auto const currentRead = readIndex_.load(std::memory_order_relaxed); + if (currentRead == writeIndex_.load(std::memory_order_acquire)) { + // queue is empty + return false; + } + + auto nextRecord = currentRead + 1; + if (nextRecord == size_) { + nextRecord = 0; + } + record = std::move(records_[currentRead]); + records_[currentRead].~T(); + readIndex_.store(nextRecord, std::memory_order_release); + return true; + } + + // pointer to the value at the front of the queue (for use in-place) or + // nullptr if empty. + T *frontPtr() { + auto const currentRead = readIndex_.load(std::memory_order_relaxed); + if (currentRead == writeIndex_.load(std::memory_order_acquire)) { + // queue is empty + return nullptr; + } + return &records_[currentRead]; + } + + // queue must not be empty + void popFront() { + auto const currentRead = readIndex_.load(std::memory_order_relaxed); + assert(currentRead != writeIndex_.load(std::memory_order_acquire)); + + auto nextRecord = currentRead + 1; + if (nextRecord == size_) { + nextRecord = 0; + } + records_[currentRead].~T(); + readIndex_.store(nextRecord, std::memory_order_release); + } + + bool isEmpty() const { + return readIndex_.load(std::memory_order_acquire) == writeIndex_.load(std::memory_order_acquire); + } + + bool isFull() const { + auto nextRecord = writeIndex_.load(std::memory_order_acquire) + 1; + if (nextRecord == size_) { + nextRecord = 0; + } + if (nextRecord != readIndex_.load(std::memory_order_acquire)) { + return false; + } + // queue is full + return true; + } + + // * If called by consumer, then true size may be more (because producer may + // be adding items concurrently). + // * If called by producer, then true size may be less (because consumer may + // be removing items concurrently). + // * It is undefined to call this from any other thread. + size_t sizeGuess() const { + int ret = writeIndex_.load(std::memory_order_acquire) - readIndex_.load(std::memory_order_acquire); + if (ret < 0) { + ret += size_; + } + return ret; + } + + // maximum number of items in the queue. + size_t capacity() const { return size_ - 1; } + + private: + using AtomicIndex = std::atomic; + + char pad0_[hardware_destructive_interference_size]; + // const uint32_t size_; + uint32_t size_; + // T *const records_; + T* records_; + + alignas(hardware_destructive_interference_size) AtomicIndex readIndex_; + alignas(hardware_destructive_interference_size) AtomicIndex writeIndex_; + + char pad1_[hardware_destructive_interference_size - sizeof(AtomicIndex)]; +}; + +} // namespace aare \ No newline at end of file diff --git a/include/aare/RawFile.hpp b/include/aare/RawFile.hpp index eb044e36..f744ac20 100644 --- a/include/aare/RawFile.hpp +++ b/include/aare/RawFile.hpp @@ -34,15 +34,19 @@ class RawFile : public FileInterface { size_t n_subfile_parts{}; // d0,d1...dn //TODO! move to vector of SubFile instead of pointers std::vector> subfiles; //subfiles[f0,f1...fn][d0,d1...dn] - std::vector positions; - std::vector m_module_pixel_0; + // std::vector positions; + ModuleConfig cfg{0, 0}; RawMasterFile m_master; size_t m_current_frame{}; - size_t m_rows{}; - size_t m_cols{}; + + // std::vector m_module_pixel_0; + // size_t m_rows{}; + // size_t m_cols{}; + + DetectorGeometry m_geometry; public: /** @@ -111,11 +115,12 @@ class RawFile : public FileInterface { */ static DetectorHeader read_header(const std::filesystem::path &fname); - void update_geometry_with_roi(); + // void update_geometry_with_roi(); int find_number_of_subfiles(); void open_subfiles(); void find_geometry(); }; + } // namespace aare \ No newline at end of file diff --git a/include/aare/RawMasterFile.hpp b/include/aare/RawMasterFile.hpp index 42c324e1..beaeb299 100644 --- a/include/aare/RawMasterFile.hpp +++ b/include/aare/RawMasterFile.hpp @@ -62,17 +62,6 @@ class ScanParameters { }; -struct ROI{ - int64_t xmin{}; - int64_t xmax{}; - int64_t ymin{}; - int64_t ymax{}; - - int64_t height() const { return ymax - ymin; } - int64_t width() const { return xmax - xmin; } -}; - - /** * @brief Class for parsing a master file either in our .json format or the old * .raw format diff --git a/include/aare/RawSubFile.hpp b/include/aare/RawSubFile.hpp index 4d786704..89c278e3 100644 --- a/include/aare/RawSubFile.hpp +++ b/include/aare/RawSubFile.hpp @@ -66,6 +66,9 @@ class RawSubFile { size_t pixels_per_frame() const { return m_rows * m_cols; } size_t bytes_per_pixel() const { return m_bitdepth / 8; } +private: + template + void read_with_map(std::byte *image_buf); }; diff --git a/include/aare/decode.hpp b/include/aare/decode.hpp new file mode 100644 index 00000000..1c3c4794 --- /dev/null +++ b/include/aare/decode.hpp @@ -0,0 +1,13 @@ +#pragma once + +#include +#include +namespace aare { + + +uint16_t adc_sar_05_decode64to16(uint64_t input); +uint16_t adc_sar_04_decode64to16(uint64_t input); +void adc_sar_05_decode64to16(NDView input, NDView output); +void adc_sar_04_decode64to16(NDView input, NDView output); + +} // namespace aare \ No newline at end of file diff --git a/include/aare/defs.hpp b/include/aare/defs.hpp index 74664102..db1a47b2 100644 --- a/include/aare/defs.hpp +++ b/include/aare/defs.hpp @@ -179,13 +179,42 @@ template struct t_xy { using xy = t_xy; +/** + * @brief Class to hold the geometry of a module. Where pixel 0 is located and the size of the module + */ struct ModuleGeometry{ - int x{}; - int y{}; + int origin_x{}; + int origin_y{}; int height{}; int width{}; + int row_index{}; + int col_index{}; }; +/** + * @brief Class to hold the geometry of a detector. Number of modules, their size and where pixel 0 + * for each module is located + */ +struct DetectorGeometry{ + int modules_x{}; + int modules_y{}; + int pixels_x{}; + int pixels_y{}; + int module_gap_row{}; + int module_gap_col{}; + std::vector module_pixel_0; +}; + +struct ROI{ + int64_t xmin{}; + int64_t xmax{}; + int64_t ymin{}; + int64_t ymax{}; + + int64_t height() const { return ymax - ymin; } + int64_t width() const { return xmax - xmin; } + }; + using dynamic_shape = std::vector; diff --git a/include/aare/geo_helpers.hpp b/include/aare/geo_helpers.hpp new file mode 100644 index 00000000..d0d5d1a7 --- /dev/null +++ b/include/aare/geo_helpers.hpp @@ -0,0 +1,16 @@ +#pragma once +#include "aare/defs.hpp" +#include "aare/RawMasterFile.hpp" //ROI refactor away +namespace aare{ + +/** + * @brief Update the detector geometry given a region of interest + * + * @param geo + * @param roi + * @return DetectorGeometry + */ +DetectorGeometry update_geometry_with_roi(DetectorGeometry geo, ROI roi); + + +} // namespace aare \ No newline at end of file diff --git a/include/aare/utils/task.hpp b/include/aare/utils/task.hpp new file mode 100644 index 00000000..a6ee1420 --- /dev/null +++ b/include/aare/utils/task.hpp @@ -0,0 +1,8 @@ + +#include +#include + +namespace aare { +std::vector> split_task(int first, int last, int n_threads); + +} // namespace aare \ No newline at end of file diff --git a/patches/lmfit.patch b/patches/lmfit.patch new file mode 100644 index 00000000..22063bf6 --- /dev/null +++ b/patches/lmfit.patch @@ -0,0 +1,13 @@ +diff --git a/lib/CMakeLists.txt b/lib/CMakeLists.txt +index 4efb7ed..6533660 100644 +--- a/lib/CMakeLists.txt ++++ b/lib/CMakeLists.txt +@@ -11,7 +11,7 @@ target_compile_definitions(${lib} PRIVATE "LMFIT_EXPORT") # for Windows DLL expo + + target_include_directories(${lib} + PUBLIC +- $ ++ $ + $ + ) + diff --git a/pyproject.toml b/pyproject.toml index b8390030..74e624fa 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "scikit_build_core.build" [project] name = "aare" -version = "2024.12.16.dev0" +version = "2025.2.12" [tool.scikit-build] cmake.verbose = true diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt index 89ad5e70..2aaa222a 100644 --- a/python/CMakeLists.txt +++ b/python/CMakeLists.txt @@ -28,6 +28,7 @@ target_link_libraries(_aare PRIVATE aare_core aare_compiler_flags) set( PYTHON_FILES aare/__init__.py aare/CtbRawFile.py + aare/func.py aare/RawFile.py aare/transform.py aare/ScanParameters.py @@ -43,10 +44,17 @@ set_target_properties(_aare PROPERTIES LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/aare ) +set(PYTHON_EXAMPLES + examples/play.py + examples/fits.py +) + -# Copy the examples/scripts to the build directory -configure_file(examples/play.py ${CMAKE_BINARY_DIR}/play.py) +# Copy the python examples to the build directory +foreach(FILE ${PYTHON_EXAMPLES}) + configure_file(${FILE} ${CMAKE_BINARY_DIR}/${FILE} ) +endforeach(FILE ${PYTHON_EXAMPLES}) if(AARE_INSTALL_PYTHONEXT) diff --git a/python/aare/__init__.py b/python/aare/__init__.py index fb34c7a6..f4c19cc9 100644 --- a/python/aare/__init__.py +++ b/python/aare/__init__.py @@ -8,8 +8,16 @@ from ._aare import ClusterFile from ._aare import hitmap +from ._aare import ClusterFinderMT, ClusterCollector, ClusterFileSink, ClusterVector_i + +from ._aare import fit_gaus, fit_pol1 + from .CtbRawFile import CtbRawFile from .RawFile import RawFile from .ScanParameters import ScanParameters -from .utils import random_pixels, random_pixel \ No newline at end of file +from .utils import random_pixels, random_pixel, flat_list + + +#make functions available in the top level API +from .func import * diff --git a/python/aare/func.py b/python/aare/func.py new file mode 100644 index 00000000..ca60cf21 --- /dev/null +++ b/python/aare/func.py @@ -0,0 +1 @@ +from ._aare import gaus, pol1 \ No newline at end of file diff --git a/python/aare/transform.py b/python/aare/transform.py index 414eb276..2f66942e 100644 --- a/python/aare/transform.py +++ b/python/aare/transform.py @@ -2,6 +2,14 @@ from . import _aare +class AdcSar04Transform64to16: + def __call__(self, data): + return _aare.adc_sar_04_decode64to16(data) + +class AdcSar05Transform64to16: + def __call__(self, data): + return _aare.adc_sar_05_decode64to16(data) + class Moench05Transform: #Could be moved to C++ without changing the interface def __init__(self): @@ -45,4 +53,6 @@ def __call__(self, data): moench05 = Moench05Transform() moench05_1g = Moench05Transform1g() moench05_old = Moench05TransformOld() -matterhorn02 = Matterhorn02Transform() \ No newline at end of file +matterhorn02 = Matterhorn02Transform() +adc_sar_04_64to16 = AdcSar04Transform64to16() +adc_sar_05_64to16 = AdcSar05Transform64to16() \ No newline at end of file diff --git a/python/aare/utils.py b/python/aare/utils.py index d53f844c..4708921c 100644 --- a/python/aare/utils.py +++ b/python/aare/utils.py @@ -20,4 +20,8 @@ def random_pixel(xmin=0, xmax=512, ymin=0, ymax=1024): Returns: tuple: (row, col) """ - return random_pixels(1, xmin, xmax, ymin, ymax)[0] \ No newline at end of file + return random_pixels(1, xmin, xmax, ymin, ymax)[0] + +def flat_list(xss): + """Flatten a list of lists.""" + return [x for xs in xss for x in xs] \ No newline at end of file diff --git a/python/examples/fits.py b/python/examples/fits.py new file mode 100644 index 00000000..aa3aef62 --- /dev/null +++ b/python/examples/fits.py @@ -0,0 +1,79 @@ +import matplotlib.pyplot as plt +import numpy as np +from aare import fit_gaus, fit_pol1 +from aare import gaus, pol1 + +textpm = f"±" # +textmu = f"μ" # +textsigma = f"σ" # + + + +# ================================= Gauss fit ================================= +# Parameters +mu = np.random.uniform(1, 100) # Mean of Gaussian +sigma = np.random.uniform(4, 20) # Standard deviation +num_points = 10000 # Number of points for smooth distribution +noise_sigma = 100 + +# Generate Gaussian distribution +data = np.random.normal(mu, sigma, num_points) + +# Generate errors for each point +errors = np.abs(np.random.normal(0, sigma, num_points)) # Errors with mean 0, std 0.5 + +# Create subplot +fig0, ax0 = plt.subplots(1, 1, num=0, figsize=(12, 8)) + +x = np.histogram(data, bins=30)[1][:-1] + 0.05 +y = np.histogram(data, bins=30)[0] +yerr = errors[:30] + + +# Add the errors as error bars in the step plot +ax0.errorbar(x, y, yerr=yerr, fmt=". ", capsize=5) +ax0.grid() + +par, err = fit_gaus(x, y, yerr) +print(par, err) + +x = np.linspace(x[0], x[-1], 1000) +ax0.plot(x, gaus(x, par), marker="") +ax0.set(xlabel="x", ylabel="Counts", title=f"A0 = {par[0]:0.2f}{textpm}{err[0]:0.2f}\n" + f"{textmu} = {par[1]:0.2f}{textpm}{err[1]:0.2f}\n" + f"{textsigma} = {par[2]:0.2f}{textpm}{err[2]:0.2f}\n" + f"(init: {textmu}: {mu:0.2f}, {textsigma}: {sigma:0.2f})") +fig0.tight_layout() + + + +# ================================= pol1 fit ================================= +# Parameters +n_points = 40 + +# Generate random slope and intercept (origin) +slope = np.random.uniform(-10, 10) # Random slope between 0.5 and 2.0 +intercept = np.random.uniform(-10, 10) # Random intercept between -10 and 10 + +# Generate random x values +x_values = np.random.uniform(-10, 10, n_points) + +# Calculate y values based on the linear function y = mx + b + error +errors = np.abs(np.random.normal(0, np.random.uniform(1, 5), n_points)) +var_points = np.random.normal(0, np.random.uniform(0.1, 2), n_points) +y_values = slope * x_values + intercept + var_points + +fig1, ax1 = plt.subplots(1, 1, num=1, figsize=(12, 8)) +ax1.errorbar(x_values, y_values, yerr=errors, fmt=". ", capsize=5) +par, err = fit_pol1(x_values, y_values, errors) + + +x = np.linspace(np.min(x_values), np.max(x_values), 1000) +ax1.plot(x, pol1(x, par), marker="") +ax1.set(xlabel="x", ylabel="y", title=f"a = {par[0]:0.2f}{textpm}{err[0]:0.2f}\n" + f"b = {par[1]:0.2f}{textpm}{err[1]:0.2f}\n" + f"(init: {slope:0.2f}, {intercept:0.2f})") +fig1.tight_layout() + +plt.show() + diff --git a/python/examples/play.py b/python/examples/play.py index 986b7180..f1a869ba 100644 --- a/python/examples/play.py +++ b/python/examples/play.py @@ -8,51 +8,61 @@ import boost_histogram as bh import time -from aare import File, ClusterFinder, VarClusterFinder +<<<<<<< HEAD +from aare import File, ClusterFinder, VarClusterFinder, ClusterFile, CtbRawFile +from aare import gaus, fit_gaus -base = Path('/mnt/sls_det_storage/matterhorn_data/aare_test_data/') - -f = File(base/'Moench03new/cu_half_speed_master_4.json') -cf = ClusterFinder((400,400), (3,3)) -for i in range(1000): - cf.push_pedestal_frame(f.read_frame()) - -fig, ax = plt.subplots() -im = ax.imshow(cf.pedestal()) -cf.pedestal() -cf.noise() - - - -N = 500 -t0 = time.perf_counter() -hist1 = bh.Histogram(bh.axis.Regular(40, -2, 4000)) -f.seek(0) +base = Path('/mnt/sls_det_storage/moench_data/Julian/MOENCH05/20250113_first_xrays_redo/raw_files/') +cluster_file = Path('/home/l_msdetect/erik/tmp/Cu.clust') t0 = time.perf_counter() -data = f.read_n(N) +offset= -0.5 +hist3d = bh.Histogram( + bh.axis.Regular(160, 0+offset, 160+offset), #x + bh.axis.Regular(150, 0+offset, 150+offset), #y + bh.axis.Regular(200, 0, 6000), #ADU +) + +total_clusters = 0 +with ClusterFile(cluster_file, chunk_size = 1000) as f: + for i, clusters in enumerate(f): + arr = np.array(clusters) + total_clusters += clusters.size + hist3d.fill(arr['y'],arr['x'], clusters.sum_2x2()) #python talks [row, col] cluster finder [x,y] +======= +from aare import RawFile + +f = RawFile('/mnt/sls_det_storage/jungfrau_data1/vadym_tests/jf12_M431/laser_scan/laserScan_pedestal_G0_master_0.json') + +print(f'{f.frame_number(1)}') + +for i in range(10): + header, img = f.read_frame() + print(header['frameNumber'], img.shape) +>>>>>>> developer + + t_elapsed = time.perf_counter()-t0 +print(f'Histogram filling took: {t_elapsed:.3f}s {total_clusters/t_elapsed/1e6:.3f}M clusters/s') +histogram_data = hist3d.counts() +x = hist3d.axes[2].edges[:-1] -n_bytes = data.itemsize*data.size - -print(f'Reading {N} frames took {t_elapsed:.3f}s {N/t_elapsed:.0f} FPS, {n_bytes/1024**2:.4f} GB/s') - - -for frame in data: - a = cf.find_clusters(frame) - -clusters = cf.steal_clusters() - -# t_elapsed = time.perf_counter()-t0 -# print(f'Clustering {N} frames took {t_elapsed:.2f}s {N/t_elapsed:.0f} FPS') +y = histogram_data[100,100,:] +xx = np.linspace(x[0], x[-1]) +# fig, ax = plt.subplots() +# ax.step(x, y, where = 'post') +y_err = np.sqrt(y) +y_err = np.zeros(y.size) +y_err += 1 -# t0 = time.perf_counter() -# total_clusters = clusters.size +# par = fit_gaus2(y,x, y_err) +# ax.plot(xx, gaus(xx,par)) +# print(par) -# hist1.fill(clusters.sum()) +res = fit_gaus(y,x) +res2 = fit_gaus(y,x, y_err) +print(res) +print(res2) -# t_elapsed = time.perf_counter()-t0 -# print(f'Filling histogram with the sum of {total_clusters} clusters took: {t_elapsed:.3f}s, {total_clusters/t_elapsed:.3g} clust/s') -# print(f'Average number of clusters per frame {total_clusters/N:.3f}') \ No newline at end of file diff --git a/python/src/cluster.hpp b/python/src/cluster.hpp index d11c706f..792b7e6d 100644 --- a/python/src/cluster.hpp +++ b/python/src/cluster.hpp @@ -1,4 +1,7 @@ +#include "aare/ClusterCollector.hpp" +#include "aare/ClusterFileSink.hpp" #include "aare/ClusterFinder.hpp" +#include "aare/ClusterFinderMT.hpp" #include "aare/ClusterVector.hpp" #include "aare/NDView.hpp" #include "aare/Pedestal.hpp" @@ -8,9 +11,10 @@ #include #include #include +#include namespace py = pybind11; -using pd_type = float; +using pd_type = double; template void define_cluster_vector(py::module &m, const std::string &typestr) { @@ -18,93 +22,166 @@ void define_cluster_vector(py::module &m, const std::string &typestr) { py::class_>(m, class_name.c_str(), py::buffer_protocol()) .def(py::init()) .def_property_readonly("size", &ClusterVector::size) - .def("element_offset", - py::overload_cast<>(&ClusterVector::element_offset, py::const_)) + .def("item_size", &ClusterVector::item_size) .def_property_readonly("fmt", - [typestr](ClusterVector &self) { - return fmt::format( - self.fmt_base(), self.cluster_size_x(), - self.cluster_size_y(), typestr); + [typestr](ClusterVector &self) { + return fmt::format( + self.fmt_base(), self.cluster_size_x(), + self.cluster_size_y(), typestr); + }) + .def("sum", + [](ClusterVector &self) { + auto *vec = new std::vector(self.sum()); + return return_vector(vec); }) - .def("sum", [](ClusterVector &self) { - auto *vec = new std::vector(self.sum()); + .def("sum_2x2", [](ClusterVector &self) { + auto *vec = new std::vector(self.sum_2x2()); return return_vector(vec); }) + .def_property_readonly("capacity", &ClusterVector::capacity) + .def_property("frame_number", &ClusterVector::frame_number, + &ClusterVector::set_frame_number) .def_buffer([typestr](ClusterVector &self) -> py::buffer_info { return py::buffer_info( - self.data(), /* Pointer to buffer */ - self.element_offset(), /* Size of one scalar */ + self.data(), /* Pointer to buffer */ + self.item_size(), /* Size of one scalar */ fmt::format(self.fmt_base(), self.cluster_size_x(), self.cluster_size_y(), typestr), /* Format descriptor */ 1, /* Number of dimensions */ - {self.size()}, /* Buffer dimensions */ - {self.element_offset()} /* Strides (in bytes) for each index */ + {self.size()}, /* Buffer dimensions */ + {self.item_size()} /* Strides (in bytes) for each index */ ); }); } -void define_cluster_finder_bindings(py::module &m) { - py::class_>(m, "ClusterFinder") - .def(py::init, Shape<2>, pd_type, size_t>(), py::arg("image_size"), - py::arg("cluster_size"), py::arg("n_sigma") = 5.0, - py::arg("capacity") = 1'000'000) +void define_cluster_finder_mt_bindings(py::module &m) { + py::class_>(m, "ClusterFinderMT") + .def(py::init, Shape<2>, pd_type, size_t, size_t>(), + py::arg("image_size"), py::arg("cluster_size"), + py::arg("n_sigma") = 5.0, py::arg("capacity") = 2048, + py::arg("n_threads") = 3) .def("push_pedestal_frame", - [](ClusterFinder &self, + [](ClusterFinderMT &self, py::array_t frame) { auto view = make_view_2d(frame); self.push_pedestal_frame(view); }) + .def( + "find_clusters", + [](ClusterFinderMT &self, + py::array_t frame, uint64_t frame_number) { + auto view = make_view_2d(frame); + self.find_clusters(view, frame_number); + return; + }, + py::arg(), py::arg("frame_number") = 0) + .def("clear_pedestal", &ClusterFinderMT::clear_pedestal) + .def("sync", &ClusterFinderMT::sync) + .def("stop", &ClusterFinderMT::stop) + .def("start", &ClusterFinderMT::start) .def("pedestal", - [](ClusterFinder &self) { + [](ClusterFinderMT &self, size_t thread_index) { auto pd = new NDArray{}; - *pd = self.pedestal(); + *pd = self.pedestal(thread_index); return return_image_data(pd); - }) + },py::arg("thread_index") = 0) .def("noise", - [](ClusterFinder &self) { + [](ClusterFinderMT &self, size_t thread_index) { auto arr = new NDArray{}; - *arr = self.noise(); + *arr = self.noise(thread_index); return return_image_data(arr); - }) - .def("steal_clusters", - [](ClusterFinder &self, bool realloc_same_capacity) { - auto v = new ClusterVector(self.steal_clusters(realloc_same_capacity)); - return v; - }, py::arg("realloc_same_capacity") = false) - .def("find_clusters", + },py::arg("thread_index") = 0); +} + +void define_cluster_collector_bindings(py::module &m) { + py::class_(m, "ClusterCollector") + .def(py::init *>()) + .def("stop", &ClusterCollector::stop) + .def( + "steal_clusters", + [](ClusterCollector &self) { + auto v = + new std::vector>(self.steal_clusters()); + return v; + }, + py::return_value_policy::take_ownership); +} + +void define_cluster_file_sink_bindings(py::module &m) { + py::class_(m, "ClusterFileSink") + .def(py::init *, + const std::filesystem::path &>()) + .def("stop", &ClusterFileSink::stop); +} + +void define_cluster_finder_bindings(py::module &m) { + py::class_>(m, "ClusterFinder") + .def(py::init, Shape<2>, pd_type, size_t>(), + py::arg("image_size"), py::arg("cluster_size"), + py::arg("n_sigma") = 5.0, py::arg("capacity") = 1'000'000) + .def("push_pedestal_frame", [](ClusterFinder &self, py::array_t frame) { auto view = make_view_2d(frame); - self.find_clusters(view); - return; - }); + self.push_pedestal_frame(view); + }) + .def("clear_pedestal", &ClusterFinder::clear_pedestal) + .def_property_readonly("pedestal", + [](ClusterFinder &self) { + auto pd = new NDArray{}; + *pd = self.pedestal(); + return return_image_data(pd); + }) + .def_property_readonly("noise", + [](ClusterFinder &self) { + auto arr = new NDArray{}; + *arr = self.noise(); + return return_image_data(arr); + }) + .def( + "steal_clusters", + [](ClusterFinder &self, + bool realloc_same_capacity) { + auto v = new ClusterVector( + self.steal_clusters(realloc_same_capacity)); + return v; + }, + py::arg("realloc_same_capacity") = false) + .def( + "find_clusters", + [](ClusterFinder &self, + py::array_t frame, uint64_t frame_number) { + auto view = make_view_2d(frame); + self.find_clusters(view, frame_number); + return; + }, + py::arg(), py::arg("frame_number") = 0); - m.def("hitmap", [](std::array image_size, ClusterVector& cv){ - - py::array_t hitmap(image_size); - auto r = hitmap.mutable_unchecked<2>(); + m.def("hitmap", + [](std::array image_size, ClusterVector &cv) { + py::array_t hitmap(image_size); + auto r = hitmap.mutable_unchecked<2>(); - // Initialize hitmap to 0 - for (py::ssize_t i = 0; i < r.shape(0); i++) - for (py::ssize_t j = 0; j < r.shape(1); j++) - r(i, j) = 0; + // Initialize hitmap to 0 + for (py::ssize_t i = 0; i < r.shape(0); i++) + for (py::ssize_t j = 0; j < r.shape(1); j++) + r(i, j) = 0; - size_t stride = cv.element_offset(); - auto ptr = cv.data(); - for(size_t i=0; i(ptr); - auto y = *reinterpret_cast(ptr+sizeof(int16_t)); - r(y, x) += 1; - ptr += stride; - } - return hitmap; - }); + size_t stride = cv.item_size(); + auto ptr = cv.data(); + for (size_t i = 0; i < cv.size(); i++) { + auto x = *reinterpret_cast(ptr); + auto y = *reinterpret_cast(ptr + sizeof(int16_t)); + r(y, x) += 1; + ptr += stride; + } + return hitmap; + }); define_cluster_vector(m, "i"); define_cluster_vector(m, "d"); define_cluster_vector(m, "f"); - py::class_(m, "DynamicCluster", py::buffer_protocol()) .def(py::init()) .def("size", &DynamicCluster::size) diff --git a/python/src/cluster_file.hpp b/python/src/cluster_file.hpp index 82870c46..8a431b55 100644 --- a/python/src/cluster_file.hpp +++ b/python/src/cluster_file.hpp @@ -28,27 +28,24 @@ void define_cluster_file_io_bindings(py::module &m) { py::arg(), py::arg("chunk_size") = 1000, py::arg("mode") = "r") .def("read_clusters", [](ClusterFile &self, size_t n_clusters) { - auto *vec = - new std::vector(self.read_clusters(n_clusters)); - return return_vector(vec); - }) + auto v = new ClusterVector(self.read_clusters(n_clusters)); + return v; + },py::return_value_policy::take_ownership) .def("read_frame", [](ClusterFile &self) { - int32_t frame_number; - auto *vec = - new std::vector(self.read_frame(frame_number)); - return py::make_tuple(frame_number, return_vector(vec)); + auto v = new ClusterVector(self.read_frame()); + return v; }) .def("write_frame", &ClusterFile::write_frame) - .def("read_cluster_with_cut", - [](ClusterFile &self, size_t n_clusters, - py::array_t noise_map, int nx, int ny) { - auto view = make_view_2d(noise_map); - auto *vec = - new std::vector(self.read_cluster_with_cut( - n_clusters, view.data(), nx, ny)); - return return_vector(vec); - }) + // .def("read_cluster_with_cut", + // [](ClusterFile &self, size_t n_clusters, + // py::array_t noise_map, int nx, int ny) { + // auto view = make_view_2d(noise_map); + // auto *vec = + // new std::vector(self.read_cluster_with_cut( + // n_clusters, view.data(), nx, ny)); + // return return_vector(vec); + // }) .def("__enter__", [](ClusterFile &self) { return &self; }) .def("__exit__", [](ClusterFile &self, @@ -59,12 +56,11 @@ void define_cluster_file_io_bindings(py::module &m) { }) .def("__iter__", [](ClusterFile &self) { return &self; }) .def("__next__", [](ClusterFile &self) { - auto vec = - new std::vector(self.read_clusters(self.chunk_size())); - if (vec->size() == 0) { + auto v = new ClusterVector(self.read_clusters(self.chunk_size())); + if (v->size() == 0) { throw py::stop_iteration(); } - return return_vector(vec); + return v; }); m.def("calculate_eta2", []( aare::ClusterVector &clusters) { diff --git a/python/src/ctb_raw_file.hpp b/python/src/ctb_raw_file.hpp index 39c10016..9ce656dd 100644 --- a/python/src/ctb_raw_file.hpp +++ b/python/src/ctb_raw_file.hpp @@ -7,6 +7,7 @@ #include "aare/RawSubFile.hpp" #include "aare/defs.hpp" +#include "aare/decode.hpp" // #include "aare/fClusterFileV2.hpp" #include @@ -23,6 +24,47 @@ using namespace ::aare; void define_ctb_raw_file_io_bindings(py::module &m) { +m.def("adc_sar_05_decode64to16", [](py::array_t input) { + + + if(input.ndim() != 2){ + throw std::runtime_error("Only 2D arrays are supported at this moment"); + } + + //Create a 2D output array with the same shape as the input + std::vector shape{input.shape(0), input.shape(1)/8}; + py::array_t output(shape); + + //Create a view of the input and output arrays + NDView input_view(reinterpret_cast(input.mutable_data()), {output.shape(0), output.shape(1)}); + NDView output_view(output.mutable_data(), {output.shape(0), output.shape(1)}); + + adc_sar_05_decode64to16(input_view, output_view); + + return output; +}); + + +m.def("adc_sar_04_decode64to16", [](py::array_t input) { + + + if(input.ndim() != 2){ + throw std::runtime_error("Only 2D arrays are supported at this moment"); + } + + //Create a 2D output array with the same shape as the input + std::vector shape{input.shape(0), input.shape(1)/8}; + py::array_t output(shape); + + //Create a view of the input and output arrays + NDView input_view(reinterpret_cast(input.mutable_data()), {output.shape(0), output.shape(1)}); + NDView output_view(output.mutable_data(), {output.shape(0), output.shape(1)}); + + adc_sar_04_decode64to16(input_view, output_view); + + return output; +}); + py::class_(m, "CtbRawFile") .def(py::init()) .def("read_frame", diff --git a/python/src/file.hpp b/python/src/file.hpp index 30fa82f3..c3c800cc 100644 --- a/python/src/file.hpp +++ b/python/src/file.hpp @@ -20,6 +20,11 @@ namespace py = pybind11; using namespace ::aare; +//Disable warnings for unused parameters, as we ignore some +//in the __exit__ method +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-parameter" + void define_file_io_bindings(py::module &m) { @@ -124,8 +129,41 @@ void define_file_io_bindings(py::module &m) { self.read_into(reinterpret_cast(image.mutable_data()), n_frames); return image; + }) + .def("__enter__", [](File &self) { return &self; }) + .def("__exit__", + [](File &self, + const std::optional &exc_type, + const std::optional &exc_value, + const std::optional &traceback) { + // self.close(); + }) + .def("__iter__", [](File &self) { return &self; }) + .def("__next__", [](File &self) { + + try{ + const uint8_t item_size = self.bytes_per_pixel(); + py::array image; + std::vector shape; + shape.reserve(2); + shape.push_back(self.rows()); + shape.push_back(self.cols()); + if (item_size == 1) { + image = py::array_t(shape); + } else if (item_size == 2) { + image = py::array_t(shape); + } else if (item_size == 4) { + image = py::array_t(shape); + } + self.read_into( + reinterpret_cast(image.mutable_data())); + return image; + }catch(std::runtime_error &e){ + throw py::stop_iteration(); + } }); + py::class_(m, "FileConfig") .def(py::init<>()) .def_readwrite("rows", &FileConfig::rows) @@ -205,7 +243,7 @@ void define_file_io_bindings(py::module &m) { return image; }); - +#pragma GCC diagnostic pop // py::class_(m, "ClusterHeader") // .def(py::init<>()) // .def_readwrite("frame_number", &ClusterHeader::frame_number) diff --git a/python/src/fit.hpp b/python/src/fit.hpp new file mode 100644 index 00000000..60cdeccc --- /dev/null +++ b/python/src/fit.hpp @@ -0,0 +1,223 @@ +#include +#include +#include +#include +#include + +#include "aare/Fit.hpp" + +namespace py = pybind11; + +void define_fit_bindings(py::module &m) { + + // TODO! Evaluate without converting to double + m.def( + "gaus", + [](py::array_t x, + py::array_t par) { + auto x_view = make_view_1d(x); + auto par_view = make_view_1d(par); + auto y = new NDArray{aare::func::gaus(x_view, par_view)}; + return return_image_data(y); + }, + R"( + Evaluate a 1D Gaussian function for all points in x using parameters par. + + Parameters + ---------- + x : array_like + The points at which to evaluate the Gaussian function. + par : array_like + The parameters of the Gaussian function. The first element is the amplitude, the second element is the mean, and the third element is the standard deviation. + )", py::arg("x"), py::arg("par")); + + m.def( + "pol1", + [](py::array_t x, + py::array_t par) { + auto x_view = make_view_1d(x); + auto par_view = make_view_1d(par); + auto y = new NDArray{aare::func::pol1(x_view, par_view)}; + return return_image_data(y); + }, + R"( + Evaluate a 1D polynomial function for all points in x using parameters par. (p0+p1*x) + + Parameters + ---------- + x : array_like + The points at which to evaluate the polynomial function. + par : array_like + The parameters of the polynomial function. The first element is the intercept, and the second element is the slope. + )", py::arg("x"), py::arg("par")); + + m.def( + "fit_gaus", + [](py::array_t x, + py::array_t y, + int n_threads) { + if (y.ndim() == 3) { + auto par = new NDArray{}; + auto y_view = make_view_3d(y); + auto x_view = make_view_1d(x); + *par = aare::fit_gaus(x_view, y_view, n_threads); + return return_image_data(par); + } else if (y.ndim() == 1) { + auto par = new NDArray{}; + auto y_view = make_view_1d(y); + auto x_view = make_view_1d(x); + *par = aare::fit_gaus(x_view, y_view); + return return_image_data(par); + } else { + throw std::runtime_error("Data must be 1D or 3D"); + } + }, +R"( +Fit a 1D Gaussian to data. + +Parameters +---------- +x : array_like + The x values. +y : array_like + The y values. +n_threads : int, optional + The number of threads to use. Default is 4. +)", + py::arg("x"), py::arg("y"), py::arg("n_threads") = 4); + + m.def( + "fit_gaus", + [](py::array_t x, + py::array_t y, + py::array_t + y_err, int n_threads) { + if (y.ndim() == 3) { + // Allocate memory for the output + // Need to have pointers to allow python to manage + // the memory + auto par = new NDArray({y.shape(0), y.shape(1), 3}); + auto par_err = + new NDArray({y.shape(0), y.shape(1), 3}); + + auto y_view = make_view_3d(y); + auto y_view_err = make_view_3d(y_err); + auto x_view = make_view_1d(x); + aare::fit_gaus(x_view, y_view, y_view_err, par->view(), + par_err->view(), n_threads); + // return return_image_data(par); + return py::make_tuple(return_image_data(par), + return_image_data(par_err)); + } else if (y.ndim() == 1) { + // Allocate memory for the output + // Need to have pointers to allow python to manage + // the memory + auto par = new NDArray({3}); + auto par_err = new NDArray({3}); + + // Decode the numpy arrays + auto y_view = make_view_1d(y); + auto y_view_err = make_view_1d(y_err); + auto x_view = make_view_1d(x); + + aare::fit_gaus(x_view, y_view, y_view_err, par->view(), + par_err->view()); + return py::make_tuple(return_image_data(par), + return_image_data(par_err)); + } else { + throw std::runtime_error("Data must be 1D or 3D"); + } + }, +R"( +Fit a 1D Gaussian to data with error estimates. + +Parameters +---------- +x : array_like + The x values. +y : array_like + The y values. +y_err : array_like + The error in the y values. +n_threads : int, optional + The number of threads to use. Default is 4. +)", + py::arg("x"), py::arg("y"), py::arg("y_err"), py::arg("n_threads") = 4); + + m.def( + "fit_pol1", + [](py::array_t x, + py::array_t y, + int n_threads) { + if (y.ndim() == 3) { + auto par = new NDArray{}; + + auto x_view = make_view_1d(x); + auto y_view = make_view_3d(y); + *par = aare::fit_pol1(x_view, y_view, n_threads); + return return_image_data(par); + } else if (y.ndim() == 1) { + auto par = new NDArray{}; + auto x_view = make_view_1d(x); + auto y_view = make_view_1d(y); + *par = aare::fit_pol1(x_view, y_view); + return return_image_data(par); + } else { + throw std::runtime_error("Data must be 1D or 3D"); + } + }, + py::arg("x"), py::arg("y"), py::arg("n_threads") = 4); + + m.def( + "fit_pol1", + [](py::array_t x, + py::array_t y, + py::array_t + y_err, int n_threads) { + if (y.ndim() == 3) { + auto par = + new NDArray({y.shape(0), y.shape(1), 2}); + auto par_err = + new NDArray({y.shape(0), y.shape(1), 2}); + + auto y_view = make_view_3d(y); + auto y_view_err = make_view_3d(y_err); + auto x_view = make_view_1d(x); + + aare::fit_pol1(x_view, y_view,y_view_err, par->view(), + par_err->view(), n_threads); + return py::make_tuple(return_image_data(par), + return_image_data(par_err)); + + } else if (y.ndim() == 1) { + auto par = new NDArray({2}); + auto par_err = new NDArray({2}); + + auto y_view = make_view_1d(y); + auto y_view_err = make_view_1d(y_err); + auto x_view = make_view_1d(x); + + aare::fit_pol1(x_view, y_view, y_view_err, par->view(), + par_err->view()); + return py::make_tuple(return_image_data(par), + return_image_data(par_err)); + } else { + throw std::runtime_error("Data must be 1D or 3D"); + } + }, +R"( +Fit a 1D polynomial to data with error estimates. + +Parameters +---------- +x : array_like + The x values. +y : array_like + The y values. +y_err : array_like + The error in the y values. +n_threads : int, optional + The number of threads to use. Default is 4. +)", + py::arg("x"), py::arg("y"), py::arg("y_err"), py::arg("n_threads") = 4); +} \ No newline at end of file diff --git a/python/src/module.cpp b/python/src/module.cpp index 14a686a1..70d143f8 100644 --- a/python/src/module.cpp +++ b/python/src/module.cpp @@ -8,6 +8,7 @@ #include "pedestal.hpp" #include "cluster.hpp" #include "cluster_file.hpp" +#include "fit.hpp" //Pybind stuff #include @@ -25,5 +26,10 @@ PYBIND11_MODULE(_aare, m) { define_pedestal_bindings(m, "Pedestal_d"); define_pedestal_bindings(m, "Pedestal_f"); define_cluster_finder_bindings(m); + define_cluster_finder_mt_bindings(m); define_cluster_file_io_bindings(m); + define_cluster_collector_bindings(m); + define_cluster_file_sink_bindings(m); + define_fit_bindings(m); + } \ No newline at end of file diff --git a/python/src/np_helper.hpp b/python/src/np_helper.hpp index e0c145bf..6e928300 100644 --- a/python/src/np_helper.hpp +++ b/python/src/np_helper.hpp @@ -39,65 +39,6 @@ template py::array return_vector(std::vector *vec) { free_when_done); // numpy array references this parent } -// template py::array do_read(Reader &r, size_t n_frames) { -// py::array image; -// if (n_frames == 0) -// n_frames = r.total_frames(); - -// std::array shape{static_cast(n_frames), r.rows(), -// r.cols()}; -// const uint8_t item_size = r.bytes_per_pixel(); -// if (item_size == 1) { -// image = py::array_t( -// shape); -// } else if (item_size == 2) { -// image = -// py::array_t( -// shape); -// } else if (item_size == 4) { -// image = -// py::array_t( -// shape); -// } -// r.read_into(reinterpret_cast(image.mutable_data()), n_frames); -// return image; -// } - -// py::array return_frame(pl::Frame *ptr) { -// py::capsule free_when_done(ptr, [](void *f) { -// pl::Frame *foo = reinterpret_cast(f); -// delete foo; -// }); - -// const uint8_t item_size = ptr->bytes_per_pixel(); -// std::vector shape; -// for (auto val : ptr->shape()) -// if (val > 1) -// shape.push_back(val); - -// std::vector strides; -// if (shape.size() == 1) -// strides.push_back(item_size); -// else if (shape.size() == 2) { -// strides.push_back(item_size * shape[1]); -// strides.push_back(item_size); -// } - -// if (item_size == 1) -// return py::array_t( -// shape, strides, -// reinterpret_cast(ptr->data()), free_when_done); -// else if (item_size == 2) -// return py::array_t(shape, strides, -// reinterpret_cast(ptr->data()), -// free_when_done); -// else if (item_size == 4) -// return py::array_t(shape, strides, -// reinterpret_cast(ptr->data()), -// free_when_done); -// return {}; -// } - // todo rewrite generic template auto get_shape_3d(py::array_t arr) { return aare::Shape<3>{arr.shape(0), arr.shape(1), arr.shape(2)}; @@ -111,6 +52,13 @@ template auto get_shape_2d(py::array_t arr) { return aare::Shape<2>{arr.shape(0), arr.shape(1)}; } +template auto get_shape_1d(py::array_t arr) { + return aare::Shape<1>{arr.shape(0)}; +} + template auto make_view_2d(py::array_t arr) { return aare::NDView(arr.mutable_data(), get_shape_2d(arr)); +} +template auto make_view_1d(py::array_t arr) { + return aare::NDView(arr.mutable_data(), get_shape_1d(arr)); } \ No newline at end of file diff --git a/src/ClusterFile.cpp b/src/ClusterFile.cpp index 855e0e71..2928d26c 100644 --- a/src/ClusterFile.cpp +++ b/src/ClusterFile.cpp @@ -20,6 +20,12 @@ ClusterFile::ClusterFile(const std::filesystem::path &fname, size_t chunk_size, throw std::runtime_error("Could not open file for writing: " + fname.string()); } + } else if (mode == "a") { + fp = fopen(fname.c_str(), "ab"); + if (!fp) { + throw std::runtime_error("Could not open file for appending: " + + fname.string()); + } } else { throw std::runtime_error("Unsupported mode: " + mode); } @@ -34,35 +40,35 @@ void ClusterFile::close() { } } -void ClusterFile::write_frame(int32_t frame_number, - const ClusterVector &clusters) { - if (m_mode != "w") { +void ClusterFile::write_frame(const ClusterVector &clusters) { + if (m_mode != "w" && m_mode != "a") { throw std::runtime_error("File not opened for writing"); } if (!(clusters.cluster_size_x() == 3) && !(clusters.cluster_size_y() == 3)) { throw std::runtime_error("Only 3x3 clusters are supported"); } + int32_t frame_number = clusters.frame_number(); fwrite(&frame_number, sizeof(frame_number), 1, fp); uint32_t n_clusters = clusters.size(); fwrite(&n_clusters, sizeof(n_clusters), 1, fp); - fwrite(clusters.data(), clusters.element_offset(), clusters.size(), fp); - // write clusters - // fwrite(clusters.data(), sizeof(Cluster), clusters.size(), fp); + fwrite(clusters.data(), clusters.item_size(), clusters.size(), fp); } -std::vector ClusterFile::read_clusters(size_t n_clusters) { +ClusterVector ClusterFile::read_clusters(size_t n_clusters) { if (m_mode != "r") { throw std::runtime_error("File not opened for reading"); } - std::vector clusters(n_clusters); + + ClusterVector clusters(3,3, n_clusters); int32_t iframe = 0; // frame number needs to be 4 bytes! size_t nph_read = 0; uint32_t nn = m_num_left; uint32_t nph = m_num_left; // number of clusters in frame needs to be 4 - auto buf = reinterpret_cast(clusters.data()); + // auto buf = reinterpret_cast(clusters.data()); + auto buf = clusters.data(); // if there are photons left from previous frame read them first if (nph) { if (nph > n_clusters) { @@ -72,8 +78,8 @@ std::vector ClusterFile::read_clusters(size_t n_clusters) { } else { nn = nph; } - nph_read += fread(reinterpret_cast(buf + nph_read), - sizeof(Cluster3x3), nn, fp); + nph_read += fread((buf + nph_read*clusters.item_size()), + clusters.item_size(), nn, fp); m_num_left = nph - nn; // write back the number of photons left } @@ -87,8 +93,8 @@ std::vector ClusterFile::read_clusters(size_t n_clusters) { else nn = nph; - nph_read += fread(reinterpret_cast(buf + nph_read), - sizeof(Cluster3x3), nn, fp); + nph_read += fread((buf + nph_read*clusters.item_size()), + clusters.item_size(), nn, fp); m_num_left = nph - nn; } if (nph_read >= n_clusters) @@ -102,7 +108,7 @@ std::vector ClusterFile::read_clusters(size_t n_clusters) { return clusters; } -std::vector ClusterFile::read_frame(int32_t &out_fnum) { +ClusterVector ClusterFile::read_frame() { if (m_mode != "r") { throw std::runtime_error("File not opened for reading"); } @@ -110,8 +116,8 @@ std::vector ClusterFile::read_frame(int32_t &out_fnum) { throw std::runtime_error( "There are still photons left in the last frame"); } - - if (fread(&out_fnum, sizeof(out_fnum), 1, fp) != 1) { + int32_t frame_number; + if (fread(&frame_number, sizeof(frame_number), 1, fp) != 1) { throw std::runtime_error("Could not read frame number"); } @@ -119,158 +125,163 @@ std::vector ClusterFile::read_frame(int32_t &out_fnum) { if (fread(&n_clusters, sizeof(n_clusters), 1, fp) != 1) { throw std::runtime_error("Could not read number of clusters"); } - std::vector clusters(n_clusters); + // std::vector clusters(n_clusters); + ClusterVector clusters(3, 3, n_clusters); + clusters.set_frame_number(frame_number); - if (fread(clusters.data(), sizeof(Cluster3x3), n_clusters, fp) != + if (fread(clusters.data(), clusters.item_size(), n_clusters, fp) != static_cast(n_clusters)) { throw std::runtime_error("Could not read clusters"); } + clusters.resize(n_clusters); return clusters; } -std::vector ClusterFile::read_cluster_with_cut(size_t n_clusters, - double *noise_map, - int nx, int ny) { - if (m_mode != "r") { - throw std::runtime_error("File not opened for reading"); - } - std::vector clusters(n_clusters); - // size_t read_clusters_with_cut(FILE *fp, size_t n_clusters, Cluster *buf, - // uint32_t *n_left, double *noise_map, int - // nx, int ny) { - int iframe = 0; - // uint32_t nph = *n_left; - uint32_t nph = m_num_left; - // uint32_t nn = *n_left; - uint32_t nn = m_num_left; - size_t nph_read = 0; - - int32_t t2max, tot1; - int32_t tot3; - // Cluster *ptr = buf; - Cluster3x3 *ptr = clusters.data(); - int good = 1; - double noise; - // read photons left from previous frame - if (noise_map) - printf("Using noise map\n"); - - if (nph) { - if (nph > n_clusters) { - // if we have more photons left in the frame then photons to - // read we read directly the requested number - nn = n_clusters; - } else { - nn = nph; - } - for (size_t iph = 0; iph < nn; iph++) { - // read photons 1 by 1 - size_t n_read = - fread(reinterpret_cast(ptr), sizeof(Cluster3x3), 1, fp); - if (n_read != 1) { - clusters.resize(nph_read); - return clusters; - } - // TODO! error handling on read - good = 1; - if (noise_map) { - if (ptr->x >= 0 && ptr->x < nx && ptr->y >= 0 && ptr->y < ny) { - tot1 = ptr->data[4]; - analyze_cluster(*ptr, &t2max, &tot3, NULL, NULL, NULL, NULL, - NULL); - noise = noise_map[ptr->y * nx + ptr->x]; - if (tot1 > noise || t2max > 2 * noise || tot3 > 3 * noise) { - ; - } else { - good = 0; - printf("%d %d %f %d %d %d\n", ptr->x, ptr->y, noise, - tot1, t2max, tot3); - } - } else { - printf("Bad pixel number %d %d\n", ptr->x, ptr->y); - good = 0; - } - } - if (good) { - ptr++; - nph_read++; - } - (m_num_left)--; - if (nph_read >= n_clusters) - break; - } - } - if (nph_read < n_clusters) { - // // keep on reading frames and photons until reaching - // n_clusters - while (fread(&iframe, sizeof(iframe), 1, fp)) { - // // printf("%d\n",nph_read); - if (fread(&nph, sizeof(nph), 1, fp)) { - // // printf("** %d\n",nph); - m_num_left = nph; - for (size_t iph = 0; iph < nph; iph++) { - // // read photons 1 by 1 - size_t n_read = fread(reinterpret_cast(ptr), - sizeof(Cluster3x3), 1, fp); - if (n_read != 1) { - clusters.resize(nph_read); - return clusters; - // return nph_read; - } - good = 1; - if (noise_map) { - if (ptr->x >= 0 && ptr->x < nx && ptr->y >= 0 && - ptr->y < ny) { - tot1 = ptr->data[4]; - analyze_cluster(*ptr, &t2max, &tot3, NULL, NULL, - NULL, NULL, NULL); - // noise = noise_map[ptr->y * nx + ptr->x]; - noise = noise_map[ptr->y + ny * ptr->x]; - if (tot1 > noise || t2max > 2 * noise || - tot3 > 3 * noise) { - ; - } else - good = 0; - } else { - printf("Bad pixel number %d %d\n", ptr->x, ptr->y); - good = 0; - } - } - if (good) { - ptr++; - nph_read++; - } - (m_num_left)--; - if (nph_read >= n_clusters) - break; - } - } - if (nph_read >= n_clusters) - break; - } - } - // printf("%d\n",nph_read); - clusters.resize(nph_read); - return clusters; -} +// std::vector ClusterFile::read_cluster_with_cut(size_t n_clusters, +// double *noise_map, +// int nx, int ny) { +// if (m_mode != "r") { +// throw std::runtime_error("File not opened for reading"); +// } +// std::vector clusters(n_clusters); +// // size_t read_clusters_with_cut(FILE *fp, size_t n_clusters, Cluster *buf, +// // uint32_t *n_left, double *noise_map, int +// // nx, int ny) { +// int iframe = 0; +// // uint32_t nph = *n_left; +// uint32_t nph = m_num_left; +// // uint32_t nn = *n_left; +// uint32_t nn = m_num_left; +// size_t nph_read = 0; + +// int32_t t2max, tot1; +// int32_t tot3; +// // Cluster *ptr = buf; +// Cluster3x3 *ptr = clusters.data(); +// int good = 1; +// double noise; +// // read photons left from previous frame +// if (noise_map) +// printf("Using noise map\n"); + +// if (nph) { +// if (nph > n_clusters) { +// // if we have more photons left in the frame then photons to +// // read we read directly the requested number +// nn = n_clusters; +// } else { +// nn = nph; +// } +// for (size_t iph = 0; iph < nn; iph++) { +// // read photons 1 by 1 +// size_t n_read = +// fread(reinterpret_cast(ptr), sizeof(Cluster3x3), 1, fp); +// if (n_read != 1) { +// clusters.resize(nph_read); +// return clusters; +// } +// // TODO! error handling on read +// good = 1; +// if (noise_map) { +// if (ptr->x >= 0 && ptr->x < nx && ptr->y >= 0 && ptr->y < ny) { +// tot1 = ptr->data[4]; +// analyze_cluster(*ptr, &t2max, &tot3, NULL, NULL, NULL, NULL, +// NULL); +// noise = noise_map[ptr->y * nx + ptr->x]; +// if (tot1 > noise || t2max > 2 * noise || tot3 > 3 * noise) { +// ; +// } else { +// good = 0; +// printf("%d %d %f %d %d %d\n", ptr->x, ptr->y, noise, +// tot1, t2max, tot3); +// } +// } else { +// printf("Bad pixel number %d %d\n", ptr->x, ptr->y); +// good = 0; +// } +// } +// if (good) { +// ptr++; +// nph_read++; +// } +// (m_num_left)--; +// if (nph_read >= n_clusters) +// break; +// } +// } +// if (nph_read < n_clusters) { +// // // keep on reading frames and photons until reaching +// // n_clusters +// while (fread(&iframe, sizeof(iframe), 1, fp)) { +// // // printf("%d\n",nph_read); + +// if (fread(&nph, sizeof(nph), 1, fp)) { +// // // printf("** %d\n",nph); +// m_num_left = nph; +// for (size_t iph = 0; iph < nph; iph++) { +// // // read photons 1 by 1 +// size_t n_read = fread(reinterpret_cast(ptr), +// sizeof(Cluster3x3), 1, fp); +// if (n_read != 1) { +// clusters.resize(nph_read); +// return clusters; +// // return nph_read; +// } +// good = 1; +// if (noise_map) { +// if (ptr->x >= 0 && ptr->x < nx && ptr->y >= 0 && +// ptr->y < ny) { +// tot1 = ptr->data[4]; +// analyze_cluster(*ptr, &t2max, &tot3, NULL, NULL, +// NULL, NULL, NULL); +// // noise = noise_map[ptr->y * nx + ptr->x]; +// noise = noise_map[ptr->y + ny * ptr->x]; +// if (tot1 > noise || t2max > 2 * noise || +// tot3 > 3 * noise) { +// ; +// } else +// good = 0; +// } else { +// printf("Bad pixel number %d %d\n", ptr->x, ptr->y); +// good = 0; +// } +// } +// if (good) { +// ptr++; +// nph_read++; +// } +// (m_num_left)--; +// if (nph_read >= n_clusters) +// break; +// } +// } +// if (nph_read >= n_clusters) +// break; +// } +// } +// // printf("%d\n",nph_read); +// clusters.resize(nph_read); +// return clusters; +// } NDArray calculate_eta2(ClusterVector &clusters) { - NDArray eta2({clusters.size(), 2}); + //TOTO! make work with 2x2 clusters + NDArray eta2({static_cast(clusters.size()), 2}); for (size_t i = 0; i < clusters.size(); i++) { - // int32_t t2; - // auto* ptr = reinterpret_cast (clusters.element_ptr(i) + 2 * - // sizeof(int16_t)); analyze_cluster(clusters.at(i), &t2, - // nullptr, nullptr, &eta2(i,0), &eta2(i,1) , nullptr, nullptr); - auto [x, y] = calculate_eta2(clusters.at(i)); - eta2(i, 0) = x; - eta2(i, 1) = y; + auto e = calculate_eta2(clusters.at(i)); + eta2(i, 0) = e.x; + eta2(i, 1) = e.y; } return eta2; } -std::array calculate_eta2(Cluster3x3 &cl) { - std::array eta2{}; +/** + * @brief Calculate the eta2 values for a 3x3 cluster and return them in a Eta2 struct + * containing etay, etax and the corner of the cluster. +*/ +Eta2 calculate_eta2(Cluster3x3 &cl) { + Eta2 eta{}; std::array tot2; tot2[0] = cl.data[0] + cl.data[1] + cl.data[3] + cl.data[4]; @@ -283,39 +294,43 @@ std::array calculate_eta2(Cluster3x3 &cl) { switch (c) { case cBottomLeft: if ((cl.data[3] + cl.data[4]) != 0) - eta2[0] = + eta.x = static_cast(cl.data[4]) / (cl.data[3] + cl.data[4]); if ((cl.data[1] + cl.data[4]) != 0) - eta2[1] = + eta.y = static_cast(cl.data[4]) / (cl.data[1] + cl.data[4]); + eta.c = cBottomLeft; break; case cBottomRight: if ((cl.data[2] + cl.data[5]) != 0) - eta2[0] = + eta.x = static_cast(cl.data[5]) / (cl.data[4] + cl.data[5]); if ((cl.data[1] + cl.data[4]) != 0) - eta2[1] = + eta.y = static_cast(cl.data[4]) / (cl.data[1] + cl.data[4]); + eta.c = cBottomRight; break; case cTopLeft: if ((cl.data[7] + cl.data[4]) != 0) - eta2[0] = + eta.x = static_cast(cl.data[4]) / (cl.data[3] + cl.data[4]); if ((cl.data[7] + cl.data[4]) != 0) - eta2[1] = + eta.y = static_cast(cl.data[7]) / (cl.data[7] + cl.data[4]); + eta.c = cTopLeft; break; case cTopRight: if ((cl.data[5] + cl.data[4]) != 0) - eta2[0] = + eta.x = static_cast(cl.data[5]) / (cl.data[5] + cl.data[4]); if ((cl.data[7] + cl.data[4]) != 0) - eta2[1] = + eta.y = static_cast(cl.data[7]) / (cl.data[7] + cl.data[4]); + eta.c = cTopRight; break; - // default:; + // no default to allow compiler to warn about missing cases } - return eta2; + return eta; } int analyze_cluster(Cluster3x3 &cl, int32_t *t2, int32_t *t3, char *quad, diff --git a/src/ClusterVector.test.cpp b/src/ClusterVector.test.cpp index 24a482be..8ca3b1ef 100644 --- a/src/ClusterVector.test.cpp +++ b/src/ClusterVector.test.cpp @@ -6,12 +6,14 @@ using aare::ClusterVector; +struct Cluster_i2x2 { + int16_t x; + int16_t y; + int32_t data[4]; +}; + TEST_CASE("ClusterVector 2x2 int32_t capacity 4, push back then read") { - struct Cluster_i2x2 { - int16_t x; - int16_t y; - int32_t data[4]; - }; + ClusterVector cv(2, 2, 4); REQUIRE(cv.capacity() == 4); @@ -19,7 +21,7 @@ TEST_CASE("ClusterVector 2x2 int32_t capacity 4, push back then read") { REQUIRE(cv.cluster_size_x() == 2); REQUIRE(cv.cluster_size_y() == 2); // int16_t, int16_t, 2x2 int32_t = 20 bytes - REQUIRE(cv.element_offset() == 20); + REQUIRE(cv.item_size() == 20); //Create a cluster and push back into the vector Cluster_i2x2 c1 = {1, 2, {3, 4, 5, 6}}; @@ -30,7 +32,7 @@ TEST_CASE("ClusterVector 2x2 int32_t capacity 4, push back then read") { //Read the cluster back out using copy. TODO! Can we improve the API? Cluster_i2x2 c2; std::byte *ptr = cv.element_ptr(0); - std::copy(ptr, ptr + cv.element_offset(), reinterpret_cast(&c2)); + std::copy(ptr, ptr + cv.item_size(), reinterpret_cast(&c2)); //Check that the data is the same REQUIRE(c1.x == c2.x); @@ -83,8 +85,8 @@ TEST_CASE("Storing floats"){ float data[8]; }; - ClusterVector cv(2, 4, 2); - REQUIRE(cv.capacity() == 2); + ClusterVector cv(2, 4, 10); + REQUIRE(cv.capacity() == 10); REQUIRE(cv.size() == 0); REQUIRE(cv.cluster_size_x() == 2); REQUIRE(cv.cluster_size_y() == 4); @@ -92,17 +94,105 @@ TEST_CASE("Storing floats"){ //Create a cluster and push back into the vector Cluster_f4x2 c1 = {1, 2, {3.0, 4.0, 5.0, 6.0,3.0, 4.0, 5.0, 6.0}}; cv.push_back(c1.x, c1.y, reinterpret_cast(&c1.data[0])); - REQUIRE(cv.capacity() == 2); + REQUIRE(cv.capacity() == 10); REQUIRE(cv.size() == 1); Cluster_f4x2 c2 = {6, 7, {8.0, 9.0, 10.0, 11.0,8.0, 9.0, 10.0, 11.0}}; cv.push_back(c2.x, c2.y, reinterpret_cast(&c2.data[0])); - REQUIRE(cv.capacity() == 2); + REQUIRE(cv.capacity() == 10); REQUIRE(cv.size() == 2); auto sums = cv.sum(); REQUIRE(sums.size() == 2); REQUIRE_THAT(sums[0], Catch::Matchers::WithinAbs(36.0, 1e-6)); REQUIRE_THAT(sums[1], Catch::Matchers::WithinAbs(76.0, 1e-6)); +} + +TEST_CASE("Push back more than initial capacity"){ + + ClusterVector cv(2, 2, 2); + auto initial_data = cv.data(); + Cluster_i2x2 c1 = {1, 2, {3, 4, 5, 6}}; + cv.push_back(c1.x, c1.y, reinterpret_cast(&c1.data[0])); + REQUIRE(cv.size() == 1); + REQUIRE(cv.capacity() == 2); + + Cluster_i2x2 c2 = {6, 7, {8, 9, 10, 11}}; + cv.push_back(c2.x, c2.y, reinterpret_cast(&c2.data[0])); + REQUIRE(cv.size() == 2); + REQUIRE(cv.capacity() == 2); + + Cluster_i2x2 c3 = {11, 12, {13, 14, 15, 16}}; + cv.push_back(c3.x, c3.y, reinterpret_cast(&c3.data[0])); + REQUIRE(cv.size() == 3); + REQUIRE(cv.capacity() == 4); + + Cluster_i2x2* ptr = reinterpret_cast(cv.data()); + REQUIRE(ptr[0].x == 1); + REQUIRE(ptr[0].y == 2); + REQUIRE(ptr[1].x == 6); + REQUIRE(ptr[1].y == 7); + REQUIRE(ptr[2].x == 11); + REQUIRE(ptr[2].y == 12); + + //We should have allocated a new buffer, since we outgrew the initial capacity + REQUIRE(initial_data != cv.data()); + +} + +TEST_CASE("Concatenate two cluster vectors where the first has enough capacity"){ + ClusterVector cv1(2, 2, 12); + Cluster_i2x2 c1 = {1, 2, {3, 4, 5, 6}}; + cv1.push_back(c1.x, c1.y, reinterpret_cast(&c1.data[0])); + Cluster_i2x2 c2 = {6, 7, {8, 9, 10, 11}}; + cv1.push_back(c2.x, c2.y, reinterpret_cast(&c2.data[0])); + + ClusterVector cv2(2, 2, 2); + Cluster_i2x2 c3 = {11, 12, {13, 14, 15, 16}}; + cv2.push_back(c3.x, c3.y, reinterpret_cast(&c3.data[0])); + Cluster_i2x2 c4 = {16, 17, {18, 19, 20, 21}}; + cv2.push_back(c4.x, c4.y, reinterpret_cast(&c4.data[0])); + + cv1 += cv2; + REQUIRE(cv1.size() == 4); + REQUIRE(cv1.capacity() == 12); + + Cluster_i2x2* ptr = reinterpret_cast(cv1.data()); + REQUIRE(ptr[0].x == 1); + REQUIRE(ptr[0].y == 2); + REQUIRE(ptr[1].x == 6); + REQUIRE(ptr[1].y == 7); + REQUIRE(ptr[2].x == 11); + REQUIRE(ptr[2].y == 12); + REQUIRE(ptr[3].x == 16); + REQUIRE(ptr[3].y == 17); +} + +TEST_CASE("Concatenate two cluster vectors where we need to allocate"){ + ClusterVector cv1(2, 2, 2); + Cluster_i2x2 c1 = {1, 2, {3, 4, 5, 6}}; + cv1.push_back(c1.x, c1.y, reinterpret_cast(&c1.data[0])); + Cluster_i2x2 c2 = {6, 7, {8, 9, 10, 11}}; + cv1.push_back(c2.x, c2.y, reinterpret_cast(&c2.data[0])); + + ClusterVector cv2(2, 2, 2); + Cluster_i2x2 c3 = {11, 12, {13, 14, 15, 16}}; + cv2.push_back(c3.x, c3.y, reinterpret_cast(&c3.data[0])); + Cluster_i2x2 c4 = {16, 17, {18, 19, 20, 21}}; + cv2.push_back(c4.x, c4.y, reinterpret_cast(&c4.data[0])); + + cv1 += cv2; + REQUIRE(cv1.size() == 4); + REQUIRE(cv1.capacity() == 4); + + Cluster_i2x2* ptr = reinterpret_cast(cv1.data()); + REQUIRE(ptr[0].x == 1); + REQUIRE(ptr[0].y == 2); + REQUIRE(ptr[1].x == 6); + REQUIRE(ptr[1].y == 7); + REQUIRE(ptr[2].x == 11); + REQUIRE(ptr[2].y == 12); + REQUIRE(ptr[3].x == 16); + REQUIRE(ptr[3].y == 17); } \ No newline at end of file diff --git a/src/File.cpp b/src/File.cpp index 37e4c57d..11809671 100644 --- a/src/File.cpp +++ b/src/File.cpp @@ -45,6 +45,8 @@ File& File::operator=(File &&other) noexcept { return *this; } +// void File::close() { file_impl->close(); } + Frame File::read_frame() { return file_impl->read_frame(); } Frame File::read_frame(size_t frame_index) { return file_impl->read_frame(frame_index); diff --git a/src/Fit.cpp b/src/Fit.cpp new file mode 100644 index 00000000..08ecaecb --- /dev/null +++ b/src/Fit.cpp @@ -0,0 +1,300 @@ +#include "aare/Fit.hpp" +#include "aare/utils/task.hpp" + +#include +#include + +#include + +namespace aare { + +namespace func { + +double gaus(const double x, const double *par) { + return par[0] * exp(-pow(x - par[1], 2) / (2 * pow(par[2], 2))); +} + +NDArray gaus(NDView x, NDView par) { + NDArray y({x.shape(0)}, 0); + for (size_t i = 0; i < x.size(); i++) { + y(i) = gaus(x(i), par.data()); + } + return y; +} + +double pol1(const double x, const double *par) { return par[0] * x + par[1]; } + +NDArray pol1(NDView x, NDView par) { + NDArray y({x.shape()}, 0); + for (size_t i = 0; i < x.size(); i++) { + y(i) = pol1(x(i), par.data()); + } + return y; +} + +} // namespace func + +NDArray fit_gaus(NDView x, NDView y) { + NDArray result({3}, 0); + lm_control_struct control = lm_control_double; + + // Estimate the initial parameters for the fit + std::vector start_par{0, 0, 0}; + auto e = std::max_element(y.begin(), y.end()); + auto idx = std::distance(y.begin(), e); + + start_par[0] = *e; // For amplitude we use the maximum value + start_par[1] = + x[idx]; // For the mean we use the x value of the maximum value + + // For sigma we estimate the fwhm and divide by 2.35 + // assuming equally spaced x values + auto delta = x[1] - x[0]; + start_par[2] = + std::count_if(y.begin(), y.end(), + [e, delta](double val) { return val > *e / 2; }) * + delta / 2.35; + + lmfit::result_t res(start_par); + lmcurve(res.par.size(), res.par.data(), x.size(), x.data(), y.data(), + aare::func::gaus, &control, &res.status); + + result(0) = res.par[0]; + result(1) = res.par[1]; + result(2) = res.par[2]; + + return result; +} + +NDArray fit_gaus(NDView x, NDView y, + int n_threads) { + NDArray result({y.shape(0), y.shape(1), 3}, 0); + + auto process = [&x, &y, &result](ssize_t first_row, ssize_t last_row) { + for (ssize_t row = first_row; row < last_row; row++) { + for (ssize_t col = 0; col < y.shape(1); col++) { + NDView values(&y(row, col, 0), {y.shape(2)}); + auto res = fit_gaus(x, values); + result(row, col, 0) = res(0); + result(row, col, 1) = res(1); + result(row, col, 2) = res(2); + } + } + }; + auto tasks = split_task(0, y.shape(0), n_threads); + std::vector threads; + for (auto &task : tasks) { + threads.push_back(std::thread(process, task.first, task.second)); + } + for (auto &thread : threads) { + thread.join(); + } + + return result; +} + +void fit_gaus(NDView x, NDView y, NDView y_err, + NDView par_out, NDView par_err_out, + int n_threads) { + + auto process = [&](ssize_t first_row, ssize_t last_row) { + for (ssize_t row = first_row; row < last_row; row++) { + for (ssize_t col = 0; col < y.shape(1); col++) { + NDView y_view(&y(row, col, 0), {y.shape(2)}); + NDView y_err_view(&y_err(row, col, 0), + {y_err.shape(2)}); + NDView par_out_view(&par_out(row, col, 0), + {par_out.shape(2)}); + NDView par_err_out_view(&par_err_out(row, col, 0), + {par_err_out.shape(2)}); + fit_gaus(x, y_view, y_err_view, par_out_view, par_err_out_view); + } + } + }; + + auto tasks = split_task(0, y.shape(0), n_threads); + std::vector threads; + for (auto &task : tasks) { + threads.push_back(std::thread(process, task.first, task.second)); + } + for (auto &thread : threads) { + thread.join(); + } +} + +void fit_gaus(NDView x, NDView y, NDView y_err, + NDView par_out, NDView par_err_out) { + // Check that we have the correct sizes + if (y.size() != x.size() || y.size() != y_err.size() || + par_out.size() != 3 || par_err_out.size() != 3) { + throw std::runtime_error("Data, x, data_err must have the same size " + "and par_out, par_err_out must have size 3"); + } + + lm_control_struct control = lm_control_double; + + // Estimate the initial parameters for the fit + std::vector start_par{0, 0, 0}; + std::vector start_par_err{0, 0, 0}; + std::vector start_cov{0, 0, 0, 0, 0, 0, 0, 0, 0}; + + auto e = std::max_element(y.begin(), y.end()); + auto idx = std::distance(y.begin(), e); + start_par[0] = *e; // For amplitude we use the maximum value + start_par[1] = + x[idx]; // For the mean we use the x value of the maximum value + + // For sigma we estimate the fwhm and divide by 2.35 + // assuming equally spaced x values + auto delta = x[1] - x[0]; + start_par[2] = + std::count_if(y.begin(), y.end(), + [e, delta](double val) { return val > *e / 2; }) * + delta / 2.35; + + lmfit::result_t res(start_par); + lmfit::result_t res_err(start_par_err); + lmfit::result_t cov(start_cov); + + // TODO can we make lmcurve write the result directly where is should be? + lmcurve2(res.par.size(), res.par.data(), res_err.par.data(), cov.par.data(), + x.size(), x.data(), y.data(), y_err.data(), aare::func::gaus, + &control, &res.status); + + par_out(0) = res.par[0]; + par_out(1) = res.par[1]; + par_out(2) = res.par[2]; + par_err_out(0) = res_err.par[0]; + par_err_out(1) = res_err.par[1]; + par_err_out(2) = res_err.par[2]; +} + +void fit_pol1(NDView x, NDView y, NDView y_err, + NDView par_out, NDView par_err_out) { + // Check that we have the correct sizes + if (y.size() != x.size() || y.size() != y_err.size() || + par_out.size() != 2 || par_err_out.size() != 2) { + throw std::runtime_error("Data, x, data_err must have the same size " + "and par_out, par_err_out must have size 2"); + } + + lm_control_struct control = lm_control_double; + + // Estimate the initial parameters for the fit + std::vector start_par{0, 0}; + std::vector start_par_err{0, 0}; + std::vector start_cov{0, 0, 0, 0}; + + auto y2 = std::max_element(y.begin(), y.end()); + auto x2 = x[std::distance(y.begin(), y2)]; + auto y1 = std::min_element(y.begin(), y.end()); + auto x1 = x[std::distance(y.begin(), y1)]; + + start_par[0] = + (*y2 - *y1) / (x2 - x1); // For amplitude we use the maximum value + start_par[1] = + *y1 - ((*y2 - *y1) / (x2 - x1)) * + x1; // For the mean we use the x value of the maximum value + + lmfit::result_t res(start_par); + lmfit::result_t res_err(start_par_err); + lmfit::result_t cov(start_cov); + + lmcurve2(res.par.size(), res.par.data(), res_err.par.data(), cov.par.data(), + x.size(), x.data(), y.data(), y_err.data(), aare::func::pol1, + &control, &res.status); + + par_out(0) = res.par[0]; + par_out(1) = res.par[1]; + par_err_out(0) = res_err.par[0]; + par_err_out(1) = res_err.par[1]; +} + +void fit_pol1(NDView x, NDView y, NDView y_err, + NDView par_out, NDView par_err_out, + int n_threads) { + + auto process = [&](ssize_t first_row, ssize_t last_row) { + for (ssize_t row = first_row; row < last_row; row++) { + for (ssize_t col = 0; col < y.shape(1); col++) { + NDView y_view(&y(row, col, 0), {y.shape(2)}); + NDView y_err_view(&y_err(row, col, 0), + {y_err.shape(2)}); + NDView par_out_view(&par_out(row, col, 0), + {par_out.shape(2)}); + NDView par_err_out_view(&par_err_out(row, col, 0), + {par_err_out.shape(2)}); + fit_pol1(x, y_view, y_err_view, par_out_view, par_err_out_view); + } + } + }; + + auto tasks = split_task(0, y.shape(0), n_threads); + std::vector threads; + for (auto &task : tasks) { + threads.push_back(std::thread(process, task.first, task.second)); + } + for (auto &thread : threads) { + thread.join(); + } +} + +NDArray fit_pol1(NDView x, NDView y) { + // // Check that we have the correct sizes + // if (y.size() != x.size() || y.size() != y_err.size() || + // par_out.size() != 2 || par_err_out.size() != 2) { + // throw std::runtime_error("Data, x, data_err must have the same size " + // "and par_out, par_err_out must have size 2"); + // } + NDArray par({2}, 0); + + lm_control_struct control = lm_control_double; + + // Estimate the initial parameters for the fit + std::vector start_par{0, 0}; + + auto y2 = std::max_element(y.begin(), y.end()); + auto x2 = x[std::distance(y.begin(), y2)]; + auto y1 = std::min_element(y.begin(), y.end()); + auto x1 = x[std::distance(y.begin(), y1)]; + + start_par[0] = (*y2 - *y1) / (x2 - x1); + start_par[1] = *y1 - ((*y2 - *y1) / (x2 - x1)) * x1; + + lmfit::result_t res(start_par); + + lmcurve(res.par.size(), res.par.data(), x.size(), x.data(), y.data(), + aare::func::pol1, &control, &res.status); + + par(0) = res.par[0]; + par(1) = res.par[1]; + return par; +} + +NDArray fit_pol1(NDView x, NDView y, + int n_threads) { + NDArray result({y.shape(0), y.shape(1), 2}, 0); + + auto process = [&](ssize_t first_row, ssize_t last_row) { + for (ssize_t row = first_row; row < last_row; row++) { + for (ssize_t col = 0; col < y.shape(1); col++) { + NDView values(&y(row, col, 0), {y.shape(2)}); + auto res = fit_pol1(x, values); + result(row, col, 0) = res(0); + result(row, col, 1) = res(1); + } + } + }; + + auto tasks = split_task(0, y.shape(0), n_threads); + std::vector threads; + for (auto &task : tasks) { + threads.push_back(std::thread(process, task.first, task.second)); + } + for (auto &thread : threads) { + thread.join(); + } + return result; +} + +} // namespace aare \ No newline at end of file diff --git a/src/RawFile.cpp b/src/RawFile.cpp index 744064f9..e704add6 100644 --- a/src/RawFile.cpp +++ b/src/RawFile.cpp @@ -1,6 +1,7 @@ #include "aare/RawFile.hpp" #include "aare/PixelMap.hpp" #include "aare/defs.hpp" +#include "aare/geo_helpers.hpp" #include #include @@ -21,8 +22,11 @@ RawFile::RawFile(const std::filesystem::path &fname, const std::string &mode) find_geometry(); - update_geometry_with_roi(); + if (m_master.roi()){ + m_geometry = update_geometry_with_roi(m_geometry, m_master.roi().value()); + } + open_subfiles(); } else { throw std::runtime_error(LOCATION + @@ -72,9 +76,13 @@ size_t RawFile::n_mod() const { return n_subfile_parts; } size_t RawFile::bytes_per_frame() { - return m_rows * m_cols * m_master.bitdepth() / 8; + // return m_rows * m_cols * m_master.bitdepth() / 8; + return m_geometry.pixels_x * m_geometry.pixels_y * m_master.bitdepth() / 8; +} +size_t RawFile::pixels_per_frame() { + // return m_rows * m_cols; + return m_geometry.pixels_x * m_geometry.pixels_y; } -size_t RawFile::pixels_per_frame() { return m_rows * m_cols; } DetectorType RawFile::detector_type() const { return m_master.detector_type(); } @@ -92,8 +100,8 @@ void RawFile::seek(size_t frame_index) { size_t RawFile::tell() { return m_current_frame; }; size_t RawFile::total_frames() const { return m_master.frames_in_file(); } -size_t RawFile::rows() const { return m_rows; } -size_t RawFile::cols() const { return m_cols; } +size_t RawFile::rows() const { return m_geometry.pixels_y; } +size_t RawFile::cols() const { return m_geometry.pixels_x; } size_t RawFile::bitdepth() const { return m_master.bitdepth(); } xy RawFile::geometry() { return m_master.geometry(); } @@ -102,11 +110,11 @@ void RawFile::open_subfiles() { for (size_t i = 0; i != n_subfiles; ++i) { auto v = std::vector(n_subfile_parts); for (size_t j = 0; j != n_subfile_parts; ++j) { - auto pos = m_module_pixel_0[j]; + auto pos = m_geometry.module_pixel_0[j]; v[j] = new RawSubFile(m_master.data_fname(j, i), m_master.detector_type(), pos.height, pos.width, m_master.bitdepth(), - positions[j].row, positions[j].col); + pos.row_index, pos.col_index); } subfiles.push_back(v); @@ -149,112 +157,49 @@ int RawFile::find_number_of_subfiles() { RawMasterFile RawFile::master() const { return m_master; } +/** + * @brief Find the geometry of the detector by opening all the subfiles and + * reading the headers. + */ void RawFile::find_geometry() { + + //Hold the maximal row and column number found + //Later used for calculating the total number of rows and columns uint16_t r{}; uint16_t c{}; for (size_t i = 0; i < n_subfile_parts; i++) { - auto h = this->read_header(m_master.data_fname(i, 0)); + auto h = read_header(m_master.data_fname(i, 0)); r = std::max(r, h.row); c = std::max(c, h.column); - positions.push_back({h.row, h.column}); + // positions.push_back({h.row, h.column}); + ModuleGeometry g; - g.x = h.column * m_master.pixels_x(); - g.y = h.row * m_master.pixels_y(); + g.origin_x = h.column * m_master.pixels_x(); + g.origin_y = h.row * m_master.pixels_y(); + g.row_index = h.row; + g.col_index = h.column; g.width = m_master.pixels_x(); g.height = m_master.pixels_y(); - m_module_pixel_0.push_back(g); + m_geometry.module_pixel_0.push_back(g); } r++; c++; - m_rows = (r * m_master.pixels_y()); - m_cols = (c * m_master.pixels_x()); - - m_rows += static_cast((r - 1) * cfg.module_gap_row); + m_geometry.pixels_y = (r * m_master.pixels_y()); + m_geometry.pixels_x = (c * m_master.pixels_x()); + m_geometry.modules_x = c; + m_geometry.modules_y = r; + m_geometry.pixels_y += static_cast((r - 1) * cfg.module_gap_row); -#ifdef AARE_VERBOSE - fmt::print("\nRawFile::find_geometry()\n"); - for (size_t i = 0; i < m_module_pixel_0.size(); i++) { - fmt::print("Module {} at position: (r:{},c:{})\n", i, - m_module_pixel_0[i].y, m_module_pixel_0[i].x); - } - fmt::print("Image size: {}x{}\n\n", m_rows, m_cols); -#endif } -void RawFile::update_geometry_with_roi() { - // TODO! implement this - if (m_master.roi()) { - auto roi = m_master.roi().value(); - - // TODO! can we do this cleaner? - int pos_y = 0; - int pos_y_increment = 0; - for (size_t row = 0; row < m_master.geometry().row; row++) { - int pos_x = 0; - for (size_t col = 0; col < m_master.geometry().col; col++) { - auto &m = m_module_pixel_0[row * m_master.geometry().col + col]; - auto original_height = m.height; - auto original_width = m.width; - - // module is to the left of the roi - if (m.x + m.width < roi.xmin) { - m.width = 0; - - // roi is in module - } else { - // here we only arrive when the roi is in or to the left of - // the module - if (roi.xmin > m.x) { - m.width -= roi.xmin - m.x; - } - if (roi.xmax < m.x + m.width) { - m.width -= m.x + original_width - roi.xmax; - } - m.x = pos_x; - pos_x += m.width; - } - - if (m.y + m.height < roi.ymin) { - m.height = 0; - } else { - if ((roi.ymin > m.y) && (roi.ymin < m.y + m.height)) { - m.height -= roi.ymin - m.y; - - } - if (roi.ymax < m.y + m.height) { - m.height -= m.y + original_height - roi.ymax; - } - m.y = pos_y; - pos_y_increment = m.height; - } - } - // increment pos_y - pos_y += pos_y_increment; - } - - m_rows = roi.height(); - m_cols = roi.width(); - } - -#ifdef AARE_VERBOSE - fmt::print("RawFile::update_geometry_with_roi()\n"); - for (const auto &m : m_module_pixel_0) { - fmt::print("Module at position: (r:{}, c:{}, h:{}, w:{})\n", m.y, m.x, - m.height, m.width); - } - fmt::print("Updated image size: {}x{}\n\n", m_rows, m_cols); - fmt::print("\n"); -#endif - -} Frame RawFile::get_frame(size_t frame_index) { - auto f = Frame(m_rows, m_cols, Dtype::from_bitdepth(m_master.bitdepth())); + auto f = Frame(m_geometry.pixels_y, m_geometry.pixels_x, Dtype::from_bitdepth(m_master.bitdepth())); std::byte *frame_buffer = f.data(); get_frame_into(frame_index, frame_buffer); return f; @@ -278,6 +223,10 @@ void RawFile::get_frame_into(size_t frame_index, std::byte *frame_buffer, Detect if (n_subfile_parts != 1) { for (size_t part_idx = 0; part_idx != n_subfile_parts; ++part_idx) { auto subfile_id = frame_index / m_master.max_frames_per_file(); + if (subfile_id >= subfiles.size()) { + throw std::runtime_error(LOCATION + + " Subfile out of range. Possible missing data."); + } frame_numbers[part_idx] = subfiles[subfile_id][part_idx]->frame_number( frame_index % m_master.max_frames_per_file()); @@ -311,12 +260,16 @@ void RawFile::get_frame_into(size_t frame_index, std::byte *frame_buffer, Detect for (size_t part_idx = 0; part_idx != n_subfile_parts; ++part_idx) { auto corrected_idx = frame_indices[part_idx]; auto subfile_id = corrected_idx / m_master.max_frames_per_file(); + if (subfile_id >= subfiles.size()) { + throw std::runtime_error(LOCATION + + " Subfile out of range. Possible missing data."); + } // This is where we start writing - auto offset = (m_module_pixel_0[part_idx].y * m_cols + - m_module_pixel_0[part_idx].x)*m_master.bitdepth()/8; + auto offset = (m_geometry.module_pixel_0[part_idx].origin_y * m_geometry.pixels_x + + m_geometry.module_pixel_0[part_idx].origin_x)*m_master.bitdepth()/8; - if (m_module_pixel_0[part_idx].x!=0) + if (m_geometry.module_pixel_0[part_idx].origin_x!=0) throw std::runtime_error(LOCATION + "Implementation error. x pos not 0."); //TODO! Risk for out of range access @@ -340,9 +293,13 @@ void RawFile::get_frame_into(size_t frame_index, std::byte *frame_buffer, Detect // level for (size_t part_idx = 0; part_idx != n_subfile_parts; ++part_idx) { - auto pos = m_module_pixel_0[part_idx]; + auto pos = m_geometry.module_pixel_0[part_idx]; auto corrected_idx = frame_indices[part_idx]; auto subfile_id = corrected_idx / m_master.max_frames_per_file(); + if (subfile_id >= subfiles.size()) { + throw std::runtime_error(LOCATION + + " Subfile out of range. Possible missing data."); + } subfiles[subfile_id][part_idx]->seek(corrected_idx % m_master.max_frames_per_file()); subfiles[subfile_id][part_idx]->read_into(part_buffer, header); @@ -352,9 +309,9 @@ void RawFile::get_frame_into(size_t frame_index, std::byte *frame_buffer, Detect for (size_t cur_row = 0; cur_row < static_cast(pos.height); cur_row++) { - auto irow = (pos.y + cur_row); - auto icol = pos.x; - auto dest = (irow * this->m_cols + icol); + auto irow = (pos.origin_y + cur_row); + auto icol = pos.origin_x; + auto dest = (irow * this->m_geometry.pixels_x + icol); dest = dest * m_master.bitdepth() / 8; memcpy(frame_buffer + dest, part_buffer + cur_row * pos.width * @@ -400,4 +357,8 @@ RawFile::~RawFile() { } } + + + + } // namespace aare \ No newline at end of file diff --git a/src/RawFile.test.cpp b/src/RawFile.test.cpp index faefd283..5f9b2e18 100644 --- a/src/RawFile.test.cpp +++ b/src/RawFile.test.cpp @@ -1,10 +1,13 @@ #include "aare/File.hpp" +#include "aare/RawMasterFile.hpp" //needed for ROI +#include "aare/RawFile.hpp" #include #include #include "test_config.hpp" + using aare::File; TEST_CASE("Read number of frames from a jungfrau raw file", "[.integration]") { @@ -148,3 +151,5 @@ TEST_CASE("Read file with unordered frames", "[.integration]") { File f(fpath); REQUIRE_THROWS((f.read_frame())); } + + diff --git a/src/RawSubFile.cpp b/src/RawSubFile.cpp index 6fae7ced..a3bb79c8 100644 --- a/src/RawSubFile.cpp +++ b/src/RawSubFile.cpp @@ -9,11 +9,13 @@ namespace aare { RawSubFile::RawSubFile(const std::filesystem::path &fname, DetectorType detector, size_t rows, size_t cols, size_t bitdepth, uint32_t pos_row, uint32_t pos_col) - : m_detector_type(detector), m_bitdepth(bitdepth), m_fname(fname), m_rows(rows), m_cols(cols), - m_bytes_per_frame((m_bitdepth / 8) * m_rows * m_cols), m_pos_row(pos_row), m_pos_col(pos_col) { + : m_detector_type(detector), m_bitdepth(bitdepth), m_fname(fname), + m_rows(rows), m_cols(cols), + m_bytes_per_frame((m_bitdepth / 8) * m_rows * m_cols), m_pos_row(pos_row), + m_pos_col(pos_col) { if (m_detector_type == DetectorType::Moench03_old) { m_pixel_map = GenerateMoench03PixelMap(); - }else if(m_detector_type == DetectorType::Eiger && m_pos_row % 2 == 0){ + } else if (m_detector_type == DetectorType::Eiger && m_pos_row % 2 == 0) { m_pixel_map = GenerateEigerFlipRowsPixelMap(); } @@ -42,7 +44,7 @@ RawSubFile::RawSubFile(const std::filesystem::path &fname, void RawSubFile::seek(size_t frame_index) { if (frame_index >= n_frames) { - throw std::runtime_error("Frame number out of range"); + throw std::runtime_error(LOCATION + fmt::format("Frame index {} out of range in a file with {} frames", frame_index, n_frames)); } m_file.seekg((sizeof(DetectorHeader) + bytes_per_frame()) * frame_index); } @@ -51,37 +53,48 @@ size_t RawSubFile::tell() { return m_file.tellg() / (sizeof(DetectorHeader) + bytes_per_frame()); } - void RawSubFile::read_into(std::byte *image_buf, DetectorHeader *header) { - if(header){ - m_file.read(reinterpret_cast(header), sizeof(DetectorHeader)); + if (header) { + m_file.read(reinterpret_cast(header), sizeof(DetectorHeader)); } else { m_file.seekg(sizeof(DetectorHeader), std::ios::cur); } - //TODO! expand support for different bitdepths - if(m_pixel_map){ + // TODO! expand support for different bitdepths + if (m_pixel_map) { // read into a temporary buffer and then copy the data to the buffer // in the correct order - // currently this only supports 16 bit data! - auto part_buffer = new std::byte[bytes_per_frame()]; - m_file.read(reinterpret_cast(part_buffer), bytes_per_frame()); - auto *data = reinterpret_cast(image_buf); - auto *part_data = reinterpret_cast(part_buffer); - for (size_t i = 0; i < pixels_per_frame(); i++) { - data[i] = part_data[(*m_pixel_map)(i)]; + // TODO! add 4 bit support + if(m_bitdepth == 8){ + read_with_map(image_buf); + }else if (m_bitdepth == 16) { + read_with_map(image_buf); + } else if (m_bitdepth == 32) { + read_with_map(image_buf); + }else{ + throw std::runtime_error("Unsupported bitdepth for read with pixel map"); } - delete[] part_buffer; + } else { // read directly into the buffer m_file.read(reinterpret_cast(image_buf), bytes_per_frame()); } } +template +void RawSubFile::read_with_map(std::byte *image_buf) { + auto part_buffer = new std::byte[bytes_per_frame()]; + m_file.read(reinterpret_cast(part_buffer), bytes_per_frame()); + auto *data = reinterpret_cast(image_buf); + auto *part_data = reinterpret_cast(part_buffer); + for (size_t i = 0; i < pixels_per_frame(); i++) { + data[i] = part_data[(*m_pixel_map)(i)]; + } + delete[] part_buffer; +} size_t RawSubFile::rows() const { return m_rows; } size_t RawSubFile::cols() const { return m_cols; } - void RawSubFile::get_part(std::byte *buffer, size_t frame_index) { seek(frame_index); read_into(buffer, nullptr); @@ -94,5 +107,4 @@ size_t RawSubFile::frame_number(size_t frame_index) { return h.frameNumber; } - } // namespace aare \ No newline at end of file diff --git a/src/decode.cpp b/src/decode.cpp new file mode 100644 index 00000000..17c033da --- /dev/null +++ b/src/decode.cpp @@ -0,0 +1,61 @@ +#include "aare/decode.hpp" + +namespace aare { + +uint16_t adc_sar_05_decode64to16(uint64_t input){ + + //we want bits 29,19,28,18,31,21,27,20,24,23,25,22 and then pad to 16 + uint16_t output = 0; + output |= ((input >> 22) & 1) << 11; + output |= ((input >> 25) & 1) << 10; + output |= ((input >> 23) & 1) << 9; + output |= ((input >> 24) & 1) << 8; + output |= ((input >> 20) & 1) << 7; + output |= ((input >> 27) & 1) << 6; + output |= ((input >> 21) & 1) << 5; + output |= ((input >> 31) & 1) << 4; + output |= ((input >> 18) & 1) << 3; + output |= ((input >> 28) & 1) << 2; + output |= ((input >> 19) & 1) << 1; + output |= ((input >> 29) & 1) << 0; + return output; +} + +void adc_sar_05_decode64to16(NDView input, NDView output){ + for(int64_t i = 0; i < input.shape(0); i++){ + for(int64_t j = 0; j < input.shape(1); j++){ + output(i,j) = adc_sar_05_decode64to16(input(i,j)); + } + } +} + +uint16_t adc_sar_04_decode64to16(uint64_t input){ + + // bit_map = array([15,17,19,21,23,4,6,8,10,12,14,16] LSB->MSB + uint16_t output = 0; + output |= ((input >> 16) & 1) << 11; + output |= ((input >> 14) & 1) << 10; + output |= ((input >> 12) & 1) << 9; + output |= ((input >> 10) & 1) << 8; + output |= ((input >> 8) & 1) << 7; + output |= ((input >> 6) & 1) << 6; + output |= ((input >> 4) & 1) << 5; + output |= ((input >> 23) & 1) << 4; + output |= ((input >> 21) & 1) << 3; + output |= ((input >> 19) & 1) << 2; + output |= ((input >> 17) & 1) << 1; + output |= ((input >> 15) & 1) << 0; + return output; +} + +void adc_sar_04_decode64to16(NDView input, NDView output){ + for(int64_t i = 0; i < input.shape(0); i++){ + for(int64_t j = 0; j < input.shape(1); j++){ + output(i,j) = adc_sar_04_decode64to16(input(i,j)); + } + } +} + + + +} // namespace aare diff --git a/src/geo_helpers.cpp b/src/geo_helpers.cpp new file mode 100644 index 00000000..39086ec1 --- /dev/null +++ b/src/geo_helpers.cpp @@ -0,0 +1,71 @@ + +#include "aare/geo_helpers.hpp" +#include "fmt/core.h" + +namespace aare{ + +DetectorGeometry update_geometry_with_roi(DetectorGeometry geo, aare::ROI roi) { + #ifdef AARE_VERBOSE + fmt::println("update_geometry_with_roi() called with ROI: {} {} {} {}", + roi.xmin, roi.xmax, roi.ymin, roi.ymax); + fmt::println("Geometry: {} {} {} {} {} {}", + geo.modules_x, geo.modules_y, geo.pixels_x, geo.pixels_y, geo.module_gap_row, geo.module_gap_col); + #endif + int pos_y = 0; + int pos_y_increment = 0; + for (int row = 0; row < geo.modules_y; row++) { + int pos_x = 0; + for (int col = 0; col < geo.modules_x; col++) { + auto &m = geo.module_pixel_0[row * geo.modules_x + col]; + auto original_height = m.height; + auto original_width = m.width; + + // module is to the left of the roi + if (m.origin_x + m.width < roi.xmin) { + m.width = 0; + + // roi is in module + } else { + // here we only arrive when the roi is in or to the left of + // the module + if (roi.xmin > m.origin_x) { + m.width -= roi.xmin - m.origin_x; + } + if (roi.xmax < m.origin_x + original_width) { + m.width -= m.origin_x + original_width - roi.xmax; + } + m.origin_x = pos_x; + pos_x += m.width; + } + + if (m.origin_y + m.height < roi.ymin) { + m.height = 0; + } else { + if ((roi.ymin > m.origin_y) && (roi.ymin < m.origin_y + m.height)) { + m.height -= roi.ymin - m.origin_y; + + } + if (roi.ymax < m.origin_y + original_height) { + m.height -= m.origin_y + original_height - roi.ymax; + } + m.origin_y = pos_y; + pos_y_increment = m.height; + } + #ifdef AARE_VERBOSE + fmt::println("Module {} {} {} {}", m.origin_x, m.origin_y, m.width, m.height); + #endif + } + // increment pos_y + pos_y += pos_y_increment; + } + + // m_rows = roi.height(); + // m_cols = roi.width(); + geo.pixels_x = roi.width(); + geo.pixels_y = roi.height(); + + return geo; + +} + +} // namespace aare \ No newline at end of file diff --git a/src/geo_helpers.test.cpp b/src/geo_helpers.test.cpp new file mode 100644 index 00000000..08ee96ca --- /dev/null +++ b/src/geo_helpers.test.cpp @@ -0,0 +1,230 @@ +#include "aare/File.hpp" +#include "aare/RawMasterFile.hpp" //needed for ROI +#include "aare/RawFile.hpp" + +#include +#include + +#include "aare/geo_helpers.hpp" +#include "test_config.hpp" + +TEST_CASE("Simple ROIs on one module"){ + // DetectorGeometry update_geometry_with_roi(DetectorGeometry geo, aare::ROI roi) + aare::DetectorGeometry geo; + + + aare::ModuleGeometry mod; + mod.origin_x = 0; + mod.origin_y = 0; + mod.width = 1024; + mod.height = 512; + + + + geo.pixels_x = 1024; + geo.pixels_y = 512; + geo.modules_x = 1; + geo.modules_y = 1; + geo.module_pixel_0.push_back(mod); + + SECTION("ROI is the whole module"){ + aare::ROI roi; + roi.xmin = 0; + roi.xmax = 1024; + roi.ymin = 0; + roi.ymax = 512; + auto updated_geo = aare::update_geometry_with_roi(geo, roi); + + REQUIRE(updated_geo.pixels_x == 1024); + REQUIRE(updated_geo.pixels_y == 512); + REQUIRE(updated_geo.modules_x == 1); + REQUIRE(updated_geo.modules_y == 1); + REQUIRE(updated_geo.module_pixel_0[0].height == 512); + REQUIRE(updated_geo.module_pixel_0[0].width == 1024); + } + SECTION("ROI is the top left corner of the module"){ + aare::ROI roi; + roi.xmin = 100; + roi.xmax = 200; + roi.ymin = 150; + roi.ymax = 200; + auto updated_geo = aare::update_geometry_with_roi(geo, roi); + + REQUIRE(updated_geo.pixels_x == 100); + REQUIRE(updated_geo.pixels_y == 50); + REQUIRE(updated_geo.modules_x == 1); + REQUIRE(updated_geo.modules_y == 1); + REQUIRE(updated_geo.module_pixel_0[0].height == 50); + REQUIRE(updated_geo.module_pixel_0[0].width == 100); + } + + SECTION("ROI is a small square"){ + aare::ROI roi; + roi.xmin = 1000; + roi.xmax = 1010; + roi.ymin = 500; + roi.ymax = 510; + auto updated_geo = aare::update_geometry_with_roi(geo, roi); + + REQUIRE(updated_geo.pixels_x == 10); + REQUIRE(updated_geo.pixels_y == 10); + REQUIRE(updated_geo.modules_x == 1); + REQUIRE(updated_geo.modules_y == 1); + REQUIRE(updated_geo.module_pixel_0[0].height == 10); + REQUIRE(updated_geo.module_pixel_0[0].width == 10); + } + SECTION("ROI is a few columns"){ + aare::ROI roi; + roi.xmin = 750; + roi.xmax = 800; + roi.ymin = 0; + roi.ymax = 512; + auto updated_geo = aare::update_geometry_with_roi(geo, roi); + + REQUIRE(updated_geo.pixels_x == 50); + REQUIRE(updated_geo.pixels_y == 512); + REQUIRE(updated_geo.modules_x == 1); + REQUIRE(updated_geo.modules_y == 1); + REQUIRE(updated_geo.module_pixel_0[0].height == 512); + REQUIRE(updated_geo.module_pixel_0[0].width == 50); + } +} + + + +TEST_CASE("Two modules side by side"){ + // DetectorGeometry update_geometry_with_roi(DetectorGeometry geo, aare::ROI roi) + aare::DetectorGeometry geo; + + + aare::ModuleGeometry mod; + mod.origin_x = 0; + mod.origin_y = 0; + mod.width = 1024; + mod.height = 512; + + geo.pixels_x = 2048; + geo.pixels_y = 512; + geo.modules_x = 2; + geo.modules_y = 1; + + geo.module_pixel_0.push_back(mod); + mod.origin_x = 1024; + geo.module_pixel_0.push_back(mod); + + SECTION("ROI is the whole image"){ + aare::ROI roi; + roi.xmin = 0; + roi.xmax = 2048; + roi.ymin = 0; + roi.ymax = 512; + auto updated_geo = aare::update_geometry_with_roi(geo, roi); + + REQUIRE(updated_geo.pixels_x == 2048); + REQUIRE(updated_geo.pixels_y == 512); + REQUIRE(updated_geo.modules_x == 2); + REQUIRE(updated_geo.modules_y == 1); + } + SECTION("rectangle on both modules"){ + aare::ROI roi; + roi.xmin = 800; + roi.xmax = 1300; + roi.ymin = 200; + roi.ymax = 499; + auto updated_geo = aare::update_geometry_with_roi(geo, roi); + + REQUIRE(updated_geo.pixels_x == 500); + REQUIRE(updated_geo.pixels_y == 299); + REQUIRE(updated_geo.modules_x == 2); + REQUIRE(updated_geo.modules_y == 1); + REQUIRE(updated_geo.module_pixel_0[0].height == 299); + REQUIRE(updated_geo.module_pixel_0[0].width == 224); + REQUIRE(updated_geo.module_pixel_0[1].height == 299); + REQUIRE(updated_geo.module_pixel_0[1].width == 276); + } +} + +TEST_CASE("Three modules side by side"){ + // DetectorGeometry update_geometry_with_roi(DetectorGeometry geo, aare::ROI roi) + aare::DetectorGeometry geo; + aare::ROI roi; + roi.xmin = 700; + roi.xmax = 2500; + roi.ymin = 0; + roi.ymax = 123; + + aare::ModuleGeometry mod; + mod.origin_x = 0; + mod.origin_y = 0; + mod.width = 1024; + mod.height = 512; + + geo.pixels_x = 3072; + geo.pixels_y = 512; + geo.modules_x = 3; + geo.modules_y = 1; + + geo.module_pixel_0.push_back(mod); + mod.origin_x = 1024; + geo.module_pixel_0.push_back(mod); + mod.origin_x = 2048; + geo.module_pixel_0.push_back(mod); + + auto updated_geo = aare::update_geometry_with_roi(geo, roi); + + REQUIRE(updated_geo.pixels_x == 1800); + REQUIRE(updated_geo.pixels_y == 123); + REQUIRE(updated_geo.modules_x == 3); + REQUIRE(updated_geo.modules_y == 1); + REQUIRE(updated_geo.module_pixel_0[0].height == 123); + REQUIRE(updated_geo.module_pixel_0[0].width == 324); + REQUIRE(updated_geo.module_pixel_0[1].height == 123); + REQUIRE(updated_geo.module_pixel_0[1].width == 1024); + REQUIRE(updated_geo.module_pixel_0[2].height == 123); + REQUIRE(updated_geo.module_pixel_0[2].width == 452); +} + +TEST_CASE("Four modules as a square"){ + // DetectorGeometry update_geometry_with_roi(DetectorGeometry geo, aare::ROI roi) + aare::DetectorGeometry geo; + aare::ROI roi; + roi.xmin = 500; + roi.xmax = 2000; + roi.ymin = 500; + roi.ymax = 600; + + aare::ModuleGeometry mod; + mod.origin_x = 0; + mod.origin_y = 0; + mod.width = 1024; + mod.height = 512; + + geo.pixels_x = 2048; + geo.pixels_y = 1024; + geo.modules_x = 2; + geo.modules_y = 2; + + geo.module_pixel_0.push_back(mod); + mod.origin_x = 1024; + geo.module_pixel_0.push_back(mod); + mod.origin_x = 0; + mod.origin_y = 512; + geo.module_pixel_0.push_back(mod); + mod.origin_x = 1024; + geo.module_pixel_0.push_back(mod); + + auto updated_geo = aare::update_geometry_with_roi(geo, roi); + + REQUIRE(updated_geo.pixels_x == 1500); + REQUIRE(updated_geo.pixels_y == 100); + REQUIRE(updated_geo.modules_x == 2); + REQUIRE(updated_geo.modules_y == 2); + REQUIRE(updated_geo.module_pixel_0[0].height == 12); + REQUIRE(updated_geo.module_pixel_0[0].width == 524); + REQUIRE(updated_geo.module_pixel_0[1].height == 12); + REQUIRE(updated_geo.module_pixel_0[1].width == 976); + REQUIRE(updated_geo.module_pixel_0[2].height == 88); + REQUIRE(updated_geo.module_pixel_0[2].width == 524); + REQUIRE(updated_geo.module_pixel_0[3].height == 88); + REQUIRE(updated_geo.module_pixel_0[3].width == 976); +} \ No newline at end of file diff --git a/src/utils/task.cpp b/src/utils/task.cpp new file mode 100644 index 00000000..af6756eb --- /dev/null +++ b/src/utils/task.cpp @@ -0,0 +1,30 @@ +#include "aare/utils/task.hpp" + +namespace aare { + +std::vector> split_task(int first, int last, + int n_threads) { + std::vector> vec; + vec.reserve(n_threads); + + int n_frames = last - first; + + if (n_threads >= n_frames) { + for (int i = 0; i != n_frames; ++i) { + vec.push_back({i, i + 1}); + } + return vec; + } + + int step = (n_frames) / n_threads; + for (int i = 0; i != n_threads; ++i) { + int start = step * i; + int stop = step * (i + 1); + if (i == n_threads - 1) + stop = last; + vec.push_back({start, stop}); + } + return vec; +} + +} // namespace aare \ No newline at end of file diff --git a/src/utils/task.test.cpp b/src/utils/task.test.cpp new file mode 100644 index 00000000..e19994a1 --- /dev/null +++ b/src/utils/task.test.cpp @@ -0,0 +1,32 @@ +#include "aare/utils/task.hpp" + +#include +#include + + +TEST_CASE("Split a range into multiple tasks"){ + + auto tasks = aare::split_task(0, 10, 3); + REQUIRE(tasks.size() == 3); + REQUIRE(tasks[0].first == 0); + REQUIRE(tasks[0].second == 3); + REQUIRE(tasks[1].first == 3); + REQUIRE(tasks[1].second == 6); + REQUIRE(tasks[2].first == 6); + REQUIRE(tasks[2].second == 10); + + tasks = aare::split_task(0, 10, 1); + REQUIRE(tasks.size() == 1); + REQUIRE(tasks[0].first == 0); + REQUIRE(tasks[0].second == 10); + + tasks = aare::split_task(0, 10, 10); + REQUIRE(tasks.size() == 10); + for (int i = 0; i < 10; i++){ + REQUIRE(tasks[i].first == i); + REQUIRE(tasks[i].second == i+1); + } + + + +} \ No newline at end of file